Esempio n. 1
0
    def update_density_estimate(self):
        # load or learn parameters
        if self.learn_parameters:
            sigma, lmbda = self.determine_sigma_lmbda()
        else:
            sigma = self.sigma0
            lmbda = self.lmbda0

        logger.info("Using sigma: %.2f, lmbda=%.6f" % (sigma, lmbda))

        D = self.Z.shape[1]
        gamma = 0.5 / (sigma**2)
        omega, u = sample_basis(D, self.m, gamma)

        logger.info("Estimate density in RKHS, N=%d, m=%d" % (self.N, self.m))
        theta = score_matching_sym(self.Z, lmbda, omega, u)

        #         logger.info("Computing objective function")
        #         J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
        #         J_xval = np.mean(xvalidate(Z, 5, sigma, self.lmbda, K))
        #         logger.info("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        #                 (self.N, sigma, self.lmbda, J, J_xval))

        dlogq_est = lambda x: log_pdf_estimate_grad(
            feature_map_grad_single(x, omega, u), theta)

        return dlogq_est
Esempio n. 2
0
def callback_sigma_lmbda(model, bounds, info, x, index, ftrue):
    global D
    fig = pl.figure(1)
    fig.clf()

    sigma = 2**info[-1]['xbest'][0]
    lmbda = 2**info[-1]['xbest'][1]
    pl.title("sigma=%.2f, lmbda=%.6f" % (sigma, lmbda))
    gamma = 0.5 * (sigma**2)
    omega, u = sample_basis(D, m, gamma)
    theta = score_matching_sym(Z, lmbda, omega, u)
    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))
    Xs = np.linspace(-3, 3)
    Ys = np.linspace(-3, 3)
    Q = evaluate_density_grid(Xs, Ys, logq_est)
    plot_array(Xs, Ys, Q, pl.gca(), plot_contour=True)
    pl.plot(Z[:, 0], Z[:, 1], 'bx')

    for ax in fig.axes:  # remove tick labels
        ax.set_xticklabels([])
        ax.set_yticklabels([])

    pl.draw()
    pl.show(block=False)
Esempio n. 3
0
def test_score_matching_sym_returns_min_1d_grid():
    N = 100
    D = 3
    m = 1
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    X = np.random.randn(N, D)

    C = compute_C_memory(X, omega, u)
    b = compute_b_memory(X, omega, u)
    lmbda = .001
    theta = score_matching_sym(X, lmbda, omega, u)
    J = objective(X, theta, lmbda, omega, u, b, C)

    thetas_test = np.linspace(theta - 3, theta + 3)
    Js = np.zeros(len(thetas_test))

    for i, theta_test in enumerate(thetas_test):
        Js[i] = objective(X, np.array([theta_test]), lmbda, omega, u, b, C)


#     plt.plot(thetas_test, Js)
#     plt.plot([theta, theta], [Js.min(), Js.max()])
#     plt.title(str(theta))
#     plt.show()

    assert_almost_equal(Js.min(), J, delta=thetas_test[1] - thetas_test[0])
    assert_almost_equal(thetas_test[Js.argmin()],
                        theta[0],
                        delta=thetas_test[1] - thetas_test[0])
Esempio n. 4
0
def update_plot(val=None):
    global omega, u
    print("Updating plot")

    lmbda = 2**s_lmbda.val
    sigma = 2**s_sigma.val

    b = compute_b(Z, omega, u)
    C = compute_C(Z, omega, u)
    theta = score_matching_sym(Z, lmbda, omega, u, b, C)
    J = objective(Z, theta, lmbda, omega, u, b, C)
    J_xval = np.mean(
        xvalidate(Z, lmbda, omega, u, n_folds=5, num_repetitions=3))

    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))

    description = "N=%d, sigma: %.2f, lambda: %.2f, m=%.d, J=%.2f, J_xval=%.2f" % \
        (N, sigma, lmbda, m, J, J_xval)

    if plot_pdf:
        D = evaluate_density_grid(Xs, Ys, logq_est)
        description = "log-pdf: " + description
    else:
        D = evaluate_density_grad_grid(Xs, Ys, dlogq_est)
        description = "norm-grad-log-pdf: " + description

    ax.clear()
    ax.plot(Z[:, 0], Z[:, 1], 'bx')
    plot_array(Xs, Ys, D, ax, plot_contour=True)

    ax.set_title(description)

    fig.canvas.draw_idle()
Esempio n. 5
0
def test_score_matching_sym():
    N = 100
    D = 3
    m = 10
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    X = np.random.randn(N, D)

    C = compute_C_memory(X, omega, u)
    b = compute_b_memory(X, omega, u)
    lmbda = 1.
    theta = score_matching_sym(X, lmbda, omega, u)
    theta_manual = np.linalg.solve(C + np.eye(m) * lmbda, b)
    assert_allclose(theta, theta_manual)
Esempio n. 6
0
def test_score_matching_sym_returns_min_random_search():
    N = 100
    D = 3
    m = 10
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    X = np.random.randn(N, D)

    C = compute_C_memory(X, omega, u)
    b = compute_b_memory(X, omega, u)
    lmbda = 1.
    theta = score_matching_sym(X, lmbda, omega, u)
    J = objective(X, theta, lmbda, omega, u, b, C)

    for noise in [0.0001, 0.001, 0.1, 1, 10, 100]:
        for _ in range(10):
            theta_test = np.random.randn(m) * noise + theta
            J_test = objective(X, theta_test, lmbda, omega, u, b, C)

            assert_less_equal(J, J_test)
Esempio n. 7
0
    def set_up(self):
        if self.learn_parameters or self.force_relearn_parameters:
            self.sigma, self.lmbda = self.determine_sigma_lmbda()

        logger.info("Using sigma=%.2f, lmbda=%.6f" % (self.sigma, self.lmbda))

        gamma = 0.5 * (self.sigma**2)
        logger.info("Sampling random basis")
        omega, u = sample_basis(self.D, self.m, gamma)

        logger.info("Estimating density in RKHS, N=%d, m=%d, D=%d" %
                    (len(self.Z), self.m, self.D))
        theta = score_matching_sym(self.Z, self.lmbda, omega, u)

        # replace target by kernel estimator to simulate trajectories on
        # but keep original target for computing acceptance probability
        self.orig_target = self.target
        self.target = RandomFeatsEstimator(theta, omega, u)

        HMCJob.set_up(self)

        # plot density estimate
        if self.plot:
            import matplotlib.pyplot as plt
            from scripts.tools.plotting import evaluate_density_grid, evaluate_gradient_grid, plot_array

            Xs = np.linspace(-15, 15)
            Ys = np.linspace(-7, 3)
            Xs_grad = np.linspace(-40, 40, 40)
            Ys_grad = np.linspace(-15, 25, 40)
            G = evaluate_density_grid(Xs, Ys, self.target.log_pdf)
            G_norm, quiver_U, quiver_V, _, _ = evaluate_gradient_grid(
                Xs_grad, Ys_grad, self.target.grad)
            plt.subplot(211)
            plt.plot(self.Z[:, 0], self.Z[:, 1], 'bx')
            plot_array(Xs, Ys, np.exp(G), plot_contour=True)
            plt.subplot(212)
            plot_array(Xs_grad, Ys_grad, G_norm, plot_contour=True)
            plt.quiver(Xs_grad, Ys_grad, quiver_U, quiver_V, color='m')
            plt.ioff()
            plt.show()
Esempio n. 8
0
    # true target log density
    logq = lambda x: log_banana_pdf(x, compute_grad=False)
    dlogq = lambda x: log_banana_pdf(x, compute_grad=True)

    # estimate density in rkhs
    N = 200
    mu = np.zeros(D)
    Z = sample_banana(N, D)
    lmbda = 1.
    sigma = select_sigma_grid(Z, lmbda=lmbda)

    K = gaussian_kernel(Z, sigma=sigma)
    b = _compute_b_sym(Z, K, sigma)
    C = _compute_C_sym(Z, K, sigma)
    a = score_matching_sym(Z, sigma, lmbda, K, b, C)
    J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
    J_xval = np.mean(xvalidate(Z, 5, sigma, lmbda, K))
    print("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
            (N, sigma, lmbda, J, J_xval))

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    # momentum
    Sigma_p = np.eye(D) * .1
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=False, is_cholesky=True)
Esempio n. 9
0
    dlogq = lambda x: log_student_pdf(x, nu, True)
    logq = lambda x: log_student_pdf(x, nu, False)

    # estimate density in rkhs
    N = 800
    mu = np.zeros(D)
    Z = np.random.standard_t(df=nu, size=(N, D))
    lmbda = 0.0001
    sigma = 0.5
    gamma = 0.5 * (sigma**2)
    m = N

    omega = gamma * np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    logger.info("Estimating density")
    theta = score_matching_sym(Z, lmbda, omega, u)

    logq_est = lambda x: log_pdf_estimate(feature_map(x, omega, u), theta)
    dlogq_est = lambda x: log_pdf_estimate_grad(
        feature_map_grad_single(x, omega, u), theta)

    # momentum
    Sigma_p = np.eye(D)
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=False, is_cholesky=True)
    dlogp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=True, is_cholesky=True)
    p_sample = lambda: sample_gaussian(
        N=1, mu=np.zeros(D), Sigma=L_p, is_cholesky=True)[0]
Esempio n. 10
0
def callback_lmbda(model, bounds, info, x, index, ftrue):
    global D
    """
    Plot the current posterior, the index, and the value of the current
    recommendation.
    """
    xmin, xmax = bounds[0]
    xx_ = np.linspace(xmin, xmax, 500)  # define grid
    xx = xx_[:, None]

    #     ff = ftrue(xx)                                      # compute true function
    acq = index(xx)  # compute acquisition

    mu, s2 = model.posterior(xx)  # compute posterior and
    lo = mu - 2 * np.sqrt(s2)  # quantiles
    hi = mu + 2 * np.sqrt(s2)

    #     ymin, ymax = ff.min(), ff.max()                     # get plotting ranges
    #     ymin -= 0.2 * (ymax - ymin)
    #     ymax += 0.2 * (ymax - ymin)

    kwplot = {'lw': 2, 'alpha': 0.5}  # common plotting kwargs

    fig = pl.figure(1)
    fig.clf()

    pl.subplot(221)
    #     pl.plot(xx, ff, 'k:', **kwplot)                     # plot true function
    pl.plot(xx, mu, 'b-', **kwplot)  # plot the posterior and
    pl.fill_between(xx_, lo, hi, color='b', alpha=0.1)  # uncertainty bands
    pl.scatter(
        info['x'],
        info['y'],  # plot data
        marker='o',
        facecolor='none',
        zorder=3)
    pl.axvline(x, color='r', **kwplot)  # latest selection
    pl.axvline(info[-1]['xbest'], color='g',
               **kwplot)  # current recommendation
    #     pl.axis((xmin, xmax, ymin, ymax))
    pl.ylabel('posterior')

    pl.subplot(223)
    pl.fill_between(
        xx_,
        acq.min(),
        acq,  # plot acquisition
        color='r',
        alpha=0.1)
    pl.axis('tight')
    pl.axvline(x, color='r', **kwplot)  # plot latest selection
    pl.xlabel('input')
    pl.ylabel('acquisition')

    pl.subplot(224)
    pl.plot(info['x'], 'g')

    pl.subplot(222)
    lmbda = 2**info[-1]['xbest']
    gamma = 0.5 * (sigma**2)
    omega, u = sample_basis(D, m, gamma)
    theta = score_matching_sym(Z, lmbda, omega, u)
    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))
    Xs = np.linspace(-3, 3)
    Ys = np.linspace(-3, 3)
    Q = evaluate_density_grid(Xs, Ys, logq_est)
    plot_array(Xs, Ys, Q, pl.gca(), plot_contour=True)
    pl.plot(Z[:, 0], Z[:, 1], 'bx')

    for ax in fig.axes:  # remove tick labels
        ax.set_xticklabels([])
        ax.set_yticklabels([])

    pl.draw()
    pl.show(block=False)