Beispiel #1
0
def update_plot(val=None):
    sigma = 2**s_sigma.val
    lmbda = 2**s_lmbda.val

    K = gaussian_kernel(Z, sigma=sigma)
    b = _compute_b_sym(Z, K, sigma)
    C = _compute_C_sym(Z, K, sigma)
    a = score_matching_sym(Z, sigma, lmbda, K, b, C)
    J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
    J_xval = np.mean(xvalidate(Z, 5, sigma, lmbda, K, num_repetitions=3))

    print(a[:5])

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    description = "N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        (N, sigma, lmbda, J, J_xval)

    if plot_pdf:
        D = evaluate_density_grid(Xs, Ys, logq_est)
        description = "log-pdf: " + description
    else:
        D = evaluate_density_grad_grid(Xs, Ys, dlogq_est)
        description = "norm-grad-log-pdf: " + description

    ax.clear()
    ax.plot(Z[:, 0], Z[:, 1], 'bx')
    plot_array(Xs, Ys, D, ax, plot_contour=True)

    ax.set_title(description)

    fig.canvas.draw_idle()
Beispiel #2
0
    D = 2

    # true target log density
    logq = lambda x: log_banana_pdf(x, compute_grad=False)
    dlogq = lambda x: log_banana_pdf(x, compute_grad=True)

    # estimate density in rkhs
    N = 200
    mu = np.zeros(D)
    Z = sample_banana(N, D)
    lmbda = 1.
    sigma = select_sigma_grid(Z, lmbda=lmbda)

    K = gaussian_kernel(Z, sigma=sigma)
    b = _compute_b_sym(Z, K, sigma)
    C = _compute_C_sym(Z, K, sigma)
    a = score_matching_sym(Z, sigma, lmbda, K, b, C)
    J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
    J_xval = np.mean(xvalidate(Z, 5, sigma, lmbda, K))
    print("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
            (N, sigma, lmbda, J, J_xval))

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    # momentum
    Sigma_p = np.eye(D) * .1
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
Beispiel #3
0
    def compute_trajectory(self, random_start_state=None):
        logger.debug("Entering")

        if random_start_state is not None:
            np.random.set_state(random_start_state)
        else:
            random_start_state = np.random.get_state()

        # momentum
        L_p = np.linalg.cholesky(np.eye(self.D) * self.sigma_p)
        self.logp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=False, is_cholesky=True)
        self.dlogp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=True, is_cholesky=True)
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]

        # set up target and momentum densities and gradients
        self.set_up()

        logger.info("Learning kernel bandwidth")
        sigma = select_sigma_grid(self.Z, lmbda=self.lmbda, log2_sigma_max=15)
        logger.info("Using lmbda=%.2f, sigma: %.2f" % (self.lmbda, sigma))

        logger.info("Computing kernel matrix")
        K = gaussian_kernel(self.Z, sigma=sigma)

        logger.info("Estimate density in RKHS")
        b = _compute_b_sym(self.Z, K, sigma)
        C = _compute_C_sym(self.Z, K, sigma)
        a = score_matching_sym(self.Z, sigma, self.lmbda, K, b, C)

        #         logger.info("Computing objective function")
        #         J = _objective_sym(Z, sigma, self.lmbda, a, K, b, C)
        #         J_xval = np.mean(xvalidate(Z, 5, sigma, self.lmbda, K))
        #         logger.info("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        #                 (self.N, sigma, self.lmbda, J, J_xval))

        kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
        dlogq_est = lambda x: log_pdf_estimate_grad(x, a, self.Z, kernel_grad)


        logger.info("Simulating trajectory for L=%d steps of size %.2f" % \
                     (self.num_steps, self.step_size))
        # starting state
        p0 = self.p_sample()
        q0 = self.q_sample()

        Qs, Ps = leapfrog(q0, self.dlogq, p0, self.dlogp, self.step_size,
                          self.num_steps, self.max_steps)

        # run second integrator for same amount of steps
        steps_taken = len(Qs)
        logger.info("%d steps taken" % steps_taken)
        Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, self.dlogp,
                                  self.step_size, steps_taken)

        logger.info("Computing average acceptance probabilities")
        log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, self.logq, self.logp)
        log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, self.logq,
                                            self.logp)
        acc_mean = np.mean(np.exp(log_acc))
        acc_est_mean = np.mean(np.exp(log_acc_est))

        logger.info("Computing average volumes")
        log_det = compute_log_det_trajectory(Qs, Ps)
        log_det_est = compute_log_det_trajectory(Qs_est, Ps_est)

        logger.info("Average acceptance prob: %.2f, %.2f" %
                    (acc_mean, acc_est_mean))
        logger.info("Log-determinant: %.2f, %.2f" % (log_det, log_det_est))

        logger.debug("Leaving")
        return acc_mean, acc_est_mean, log_det, log_det_est, steps_taken, random_start_state