예제 #1
0
    def set_up(self):
        # place a gamma on the Eigenvalues of a Gaussian covariance
        EVs = np.random.gamma(shape=1, size=self.D)

        # random orthogonal matrix to rotate
        Q = qmult(np.eye(self.D))
        Sigma = Q.T.dot(np.diag(EVs)).dot(Q)

        # Cholesky of random covariance
        L = np.linalg.cholesky(Sigma)

        # target density
        self.dlogq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=True)
        self.logq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=False)

        # starting state
        self.q_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L, is_cholesky=True)[0]

        logger.info("N=%d, D=%d" % (self.N, self.D))
        self.Z = sample_gaussian(self.N,
                                 mu=np.zeros(self.D),
                                 Sigma=L,
                                 is_cholesky=True)
예제 #2
0
    def set_up(self):
        L = np.linalg.cholesky(np.eye(self.D) * self.sigma_q)

        # target density
        self.dlogq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=True)
        self.logq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=False)

        # starting state
        self.q_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L, is_cholesky=True)[0]

        logger.info("N=%d, D=%d" % (self.N, self.D))
        self.Z = sample_gaussian(self.N,
                                 mu=np.zeros(self.D),
                                 Sigma=L,
                                 is_cholesky=True)
예제 #3
0
def leapfrog_friction_habc_no_storing(c,
                                      V,
                                      q,
                                      dlogq,
                                      p,
                                      dlogp,
                                      step_size=0.3,
                                      num_steps=1):
    logger.debug("Entering")
    """
    MATLAB code by Chen:
    
function [ newx ] = sghmc( U, gradU, m, dt, nstep, x, C, V )
%% SGHMC using gradU, for nstep, starting at position x

p = randn( size(x) ) * sqrt( m );
B = 0.5 * V * dt; 
D = sqrt( 2 * (C-B) * dt );

for i = 1 : nstep
    p = p - gradU( x ) * dt  - p * C * dt  + randn(1)*D;
    x = x + p./m * dt;
end
newx = x;
end
    """

    # friction term (as in HABC)
    D = len(q)
    B = 0.5 * V * step_size
    C = np.eye(D) * c + V
    L_friction = np.linalg.cholesky(2 * step_size * (C - B))
    zeros_D = np.zeros(D)

    # create copy of state
    p = np.array(p.copy())
    q = np.array(q.copy())

    # alternate full momentum and variable updates
    for _ in range(num_steps):
        friction = sample_gaussian(N=1,
                                   mu=zeros_D,
                                   Sigma=L_friction,
                                   is_cholesky=True)[0]

        # just like normal momentum update but with friction
        p = p - step_size * -dlogq(q) - step_size * C.dot(-dlogp(p)) + friction

        # normal position update
        q = q + step_size * -dlogp(p)

    logger.debug("Leaving")
    return q, p
예제 #4
0
    print("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
            (N, sigma, lmbda, J, J_xval))

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    # momentum
    Sigma_p = np.eye(D) * .1
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=False, is_cholesky=True)
    dlogp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=True, is_cholesky=True)
    p_sample = lambda: sample_gaussian(
        N=1, mu=np.zeros(D), Sigma=L_p, is_cholesky=True)[0]

    # starting state
    p0 = p_sample()
    q0 = np.zeros(D)
    q0[:2] = np.array([0, -3])

    # parameters
    num_steps = 1500
    step_size = .1

    Xs_q = np.linspace(-20, 20)
    Ys_q = np.linspace(-10, 10)
    Xs_p = np.linspace(-1, 1)
    Ys_p = np.linspace(-1, 1)
예제 #5
0
    def compute_trajectory(self, random_start_state=None):
        logger.debug("Entering")

        if random_start_state is not None:
            np.random.set_state(random_start_state)
        else:
            random_start_state = np.random.get_state()

        # momentum
        L_p = np.linalg.cholesky(np.eye(self.D) * self.sigma_p)
        self.logp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=False, is_cholesky=True)
        self.dlogp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=True, is_cholesky=True)
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]

        # set up target and momentum densities and gradients
        self.set_up()

        dlogq_est = self.update_density_estimate()

        # random number of steps?
        if self.max_steps is not None:
            steps = np.random.randint(self.num_steps, self.max_steps + 1)
        else:
            steps = self.num_steps

        logger.info("Simulating trajectory for at least L=%d steps of size %.2f" % \
                     (self.num_steps, self.step_size))
        # starting state
        p0 = self.p_sample()
        q0 = self.q_sample()

        Qs, Ps = leapfrog(q0, self.dlogq, p0, self.dlogp, self.step_size,
                          steps)

        # run second integrator for same amount of steps
        steps_taken = len(Qs)
        Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, self.dlogp,
                                  self.step_size, steps_taken)
        logger.info("%d steps taken" % steps_taken)

        logger.info("Computing average acceptance probabilities")
        log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, self.logq, self.logp)
        log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, self.logq,
                                            self.logp)
        acc_mean = np.mean(np.exp(log_acc))
        acc_est_mean = np.mean(np.exp(log_acc_est))
        idx09 = int(len(log_acc) * 0.9)
        acc_mean10 = np.mean(np.exp(log_acc[idx09:]))
        acc_est_mean10 = np.mean(np.exp(log_acc_est[idx09:]))

        logger.info("Computing average volumes")
        log_det = compute_log_det_trajectory(Qs, Ps)
        log_det_est = compute_log_det_trajectory(Qs_est, Ps_est)

        logger.info("Average acceptance prob: %.2f, %.2f" %
                    (acc_mean, acc_est_mean))
        logger.info("Average acceptance prob (last 10 percent): %.2f, %.2f" %
                    (acc_mean10, acc_est_mean10))
        logger.info("Log-determinant: %.2f, %.2f" % (log_det, log_det_est))

        logger.debug("Leaving")
        return acc_mean, acc_est_mean, log_det, log_det_est, steps_taken, random_start_state
예제 #6
0
    def compute_trajectory(self, random_start_state=None):
        logger.debug("Entering")

        if random_start_state is not None:
            np.random.set_state(random_start_state)
        else:
            random_start_state = np.random.get_state()

        # momentum
        L_p = np.linalg.cholesky(np.eye(self.D) * self.sigma_p)
        self.logp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=False, is_cholesky=True)
        self.dlogp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=True, is_cholesky=True)
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]

        # set up target and momentum densities and gradients
        self.set_up()

        logger.info("Learning kernel bandwidth")
        sigma = select_sigma_grid(self.Z, lmbda=self.lmbda, log2_sigma_max=15)
        logger.info("Using lmbda=%.2f, sigma: %.2f" % (self.lmbda, sigma))

        logger.info("Computing kernel matrix")
        K = gaussian_kernel(self.Z, sigma=sigma)

        logger.info("Estimate density in RKHS")
        b = _compute_b_sym(self.Z, K, sigma)
        C = _compute_C_sym(self.Z, K, sigma)
        a = score_matching_sym(self.Z, sigma, self.lmbda, K, b, C)

        #         logger.info("Computing objective function")
        #         J = _objective_sym(Z, sigma, self.lmbda, a, K, b, C)
        #         J_xval = np.mean(xvalidate(Z, 5, sigma, self.lmbda, K))
        #         logger.info("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        #                 (self.N, sigma, self.lmbda, J, J_xval))

        kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
        dlogq_est = lambda x: log_pdf_estimate_grad(x, a, self.Z, kernel_grad)


        logger.info("Simulating trajectory for L=%d steps of size %.2f" % \
                     (self.num_steps, self.step_size))
        # starting state
        p0 = self.p_sample()
        q0 = self.q_sample()

        Qs, Ps = leapfrog(q0, self.dlogq, p0, self.dlogp, self.step_size,
                          self.num_steps, self.max_steps)

        # run second integrator for same amount of steps
        steps_taken = len(Qs)
        logger.info("%d steps taken" % steps_taken)
        Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, self.dlogp,
                                  self.step_size, steps_taken)

        logger.info("Computing average acceptance probabilities")
        log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, self.logq, self.logp)
        log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, self.logq,
                                            self.logp)
        acc_mean = np.mean(np.exp(log_acc))
        acc_est_mean = np.mean(np.exp(log_acc_est))

        logger.info("Computing average volumes")
        log_det = compute_log_det_trajectory(Qs, Ps)
        log_det_est = compute_log_det_trajectory(Qs_est, Ps_est)

        logger.info("Average acceptance prob: %.2f, %.2f" %
                    (acc_mean, acc_est_mean))
        logger.info("Log-determinant: %.2f, %.2f" % (log_det, log_det_est))

        logger.debug("Leaving")
        return acc_mean, acc_est_mean, log_det, log_det_est, steps_taken, random_start_state
예제 #7
0

if __name__ == "__main__":
    D = 2

    # true target log density
    Sigma = np.diag(np.linspace(0.01, 1, D))
    Sigma[:2, :2] = np.array([[1, .95], [.95, 1]])
    Sigma = np.eye(D)
    L = np.linalg.cholesky(Sigma)

    # estimate density in rkhs
    N = 200
    mu = np.zeros(D)
    np.random.seed(0)
    Z = sample_gaussian(N, mu, Sigma=L, is_cholesky=True)
    #     print np.sum(Z) * np.std(Z) * np.sum(Z**2) * np.std(Z**2)

    resolution = 10
    log2_sigmas = np.linspace(-3, 3, resolution)
    log2_lambdas = np.linspace(-15, 3, resolution)

    Js = np.zeros((len(log2_sigmas), len(log2_lambdas)))
    Js_std = np.zeros(Js.shape)
    pool = Pool()
    chunksize = 1
    for ind, res in enumerate(
            pool.imap(fun, product(log2_sigmas, log2_lambdas)), chunksize):
        Js.flat[ind - 1], Js_std.flat[ind - 1] = res

    im = plt.pcolor(log2_lambdas, log2_sigmas, np.log2(Js - Js.min() + 1))