Example #1
0
def update_plot(val=None):
    sigma = 2**s_sigma.val
    lmbda = 2**s_lmbda.val

    K = gaussian_kernel(Z, sigma=sigma)
    b = _compute_b_sym(Z, K, sigma)
    C = _compute_C_sym(Z, K, sigma)
    a = score_matching_sym(Z, sigma, lmbda, K, b, C)
    J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
    J_xval = np.mean(xvalidate(Z, 5, sigma, lmbda, K, num_repetitions=3))

    print(a[:5])

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    description = "N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        (N, sigma, lmbda, J, J_xval)

    if plot_pdf:
        D = evaluate_density_grid(Xs, Ys, logq_est)
        description = "log-pdf: " + description
    else:
        D = evaluate_density_grad_grid(Xs, Ys, dlogq_est)
        description = "norm-grad-log-pdf: " + description

    ax.clear()
    ax.plot(Z[:, 0], Z[:, 1], 'bx')
    plot_array(Xs, Ys, D, ax, plot_contour=True)

    ax.set_title(description)

    fig.canvas.draw_idle()
Example #2
0
def callback_sigma_lmbda(model, bounds, info, x, index, ftrue):
    global D
    fig = pl.figure(1)
    fig.clf()

    sigma = 2**info[-1]['xbest'][0]
    lmbda = 2**info[-1]['xbest'][1]
    pl.title("sigma=%.2f, lmbda=%.6f" % (sigma, lmbda))
    gamma = 0.5 * (sigma**2)
    omega, u = sample_basis(D, m, gamma)
    theta = score_matching_sym(Z, lmbda, omega, u)
    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))
    Xs = np.linspace(-3, 3)
    Ys = np.linspace(-3, 3)
    Q = evaluate_density_grid(Xs, Ys, logq_est)
    plot_array(Xs, Ys, Q, pl.gca(), plot_contour=True)
    pl.plot(Z[:, 0], Z[:, 1], 'bx')

    for ax in fig.axes:  # remove tick labels
        ax.set_xticklabels([])
        ax.set_yticklabels([])

    pl.draw()
    pl.show(block=False)
Example #3
0
def update_plot(val=None):
    global omega, u
    print("Updating plot")

    lmbda = 2**s_lmbda.val
    sigma = 2**s_sigma.val

    b = compute_b(Z, omega, u)
    C = compute_C(Z, omega, u)
    theta = score_matching_sym(Z, lmbda, omega, u, b, C)
    J = objective(Z, theta, lmbda, omega, u, b, C)
    J_xval = np.mean(
        xvalidate(Z, lmbda, omega, u, n_folds=5, num_repetitions=3))

    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))

    description = "N=%d, sigma: %.2f, lambda: %.2f, m=%.d, J=%.2f, J_xval=%.2f" % \
        (N, sigma, lmbda, m, J, J_xval)

    if plot_pdf:
        D = evaluate_density_grid(Xs, Ys, logq_est)
        description = "log-pdf: " + description
    else:
        D = evaluate_density_grad_grid(Xs, Ys, dlogq_est)
        description = "norm-grad-log-pdf: " + description

    ax.clear()
    ax.plot(Z[:, 0], Z[:, 1], 'bx')
    plot_array(Xs, Ys, D, ax, plot_contour=True)

    ax.set_title(description)

    fig.canvas.draw_idle()
Example #4
0
def plot_true():
    G = evaluate_density_grid(Xs, Ys, logq)
    G_grad = evaluate_density_grad_grid(Xs, Ys, dlogq)

    plt.figure(figsize=(12, 4))
    plt.subplot(121)
    plot_array(Xs, Ys, G, plot_contour=True)
    plt.plot(Z[:, 0], Z[:, 1], 'bx')
    plt.title("True log-pdf")

    plt.subplot(122)
    plot_array(Xs, Ys, G_grad, plot_contour=True)
    plt.plot(Z[:, 0], Z[:, 1], 'bx')
    plt.title("True gradient norm log-pdf")
Example #5
0
    def set_up(self):
        if self.learn_parameters or self.force_relearn_parameters:
            self.sigma, self.lmbda = self.determine_sigma_lmbda()

        logger.info("Using sigma=%.2f, lmbda=%.6f" % (self.sigma, self.lmbda))

        gamma = 0.5 * (self.sigma**2)
        logger.info("Sampling random basis")
        omega, u = sample_basis(self.D, self.m, gamma)

        logger.info("Estimating density in RKHS, N=%d, m=%d, D=%d" %
                    (len(self.Z), self.m, self.D))
        theta = score_matching_sym(self.Z, self.lmbda, omega, u)

        # replace target by kernel estimator to simulate trajectories on
        # but keep original target for computing acceptance probability
        self.orig_target = self.target
        self.target = RandomFeatsEstimator(theta, omega, u)

        HMCJob.set_up(self)

        # plot density estimate
        if self.plot:
            import matplotlib.pyplot as plt
            from scripts.tools.plotting import evaluate_density_grid, evaluate_gradient_grid, plot_array

            Xs = np.linspace(-15, 15)
            Ys = np.linspace(-7, 3)
            Xs_grad = np.linspace(-40, 40, 40)
            Ys_grad = np.linspace(-15, 25, 40)
            G = evaluate_density_grid(Xs, Ys, self.target.log_pdf)
            G_norm, quiver_U, quiver_V, _, _ = evaluate_gradient_grid(
                Xs_grad, Ys_grad, self.target.grad)
            plt.subplot(211)
            plt.plot(self.Z[:, 0], self.Z[:, 1], 'bx')
            plot_array(Xs, Ys, np.exp(G), plot_contour=True)
            plt.subplot(212)
            plot_array(Xs_grad, Ys_grad, G_norm, plot_contour=True)
            plt.quiver(Xs_grad, Ys_grad, quiver_U, quiver_V, color='m')
            plt.ioff()
            plt.show()
Example #6
0
theta = score_matching_sym(Z, lmbda, omega, u)
logq_est = lambda x: log_pdf_estimate(feature_map_single(x, omega, u), theta)
dlogq_est = lambda x: log_pdf_estimate_grad(feature_map_grad_single(x, omega, u),
                                            theta)

# plot density estimate
plt.figure(figsize=(4, 8))
Xs = np.linspace(-15, 15)
Ys = np.linspace(-7, 3)
Xs_grad = np.linspace(-40, 40, 40)
Ys_grad = np.linspace(-15, 25, 40)
G = evaluate_density_grid(Xs, Ys, logq_est)
G_norm, quiver_U, quiver_V, _, _ = evaluate_gradient_grid(Xs_grad, Ys_grad, dlogq_est)
plt.subplot(211)
plt.plot(Z[:, 0], Z[:, 1], 'bx')
plot_array(Xs, Ys, np.exp(G), plot_contour=True)
plt.subplot(212)
plot_array(Xs_grad, Ys_grad, G_norm, plot_contour=True)
plt.quiver(Xs_grad, Ys_grad, quiver_U, quiver_V, color='m')
plt.show()
# plain MCMC parameters
thin = 10
num_warmup = 10
num_iterations = 1000 + num_warmup * thin
q_current = np.array([0., -3.]) + 50
q_current_est = q_current

# hmc parameters
num_steps_min = 10
num_steps_max = 100
step_size_min = 0.05
Example #7
0
def callback_lmbda(model, bounds, info, x, index, ftrue):
    global D
    """
    Plot the current posterior, the index, and the value of the current
    recommendation.
    """
    xmin, xmax = bounds[0]
    xx_ = np.linspace(xmin, xmax, 500)  # define grid
    xx = xx_[:, None]

    #     ff = ftrue(xx)                                      # compute true function
    acq = index(xx)  # compute acquisition

    mu, s2 = model.posterior(xx)  # compute posterior and
    lo = mu - 2 * np.sqrt(s2)  # quantiles
    hi = mu + 2 * np.sqrt(s2)

    #     ymin, ymax = ff.min(), ff.max()                     # get plotting ranges
    #     ymin -= 0.2 * (ymax - ymin)
    #     ymax += 0.2 * (ymax - ymin)

    kwplot = {'lw': 2, 'alpha': 0.5}  # common plotting kwargs

    fig = pl.figure(1)
    fig.clf()

    pl.subplot(221)
    #     pl.plot(xx, ff, 'k:', **kwplot)                     # plot true function
    pl.plot(xx, mu, 'b-', **kwplot)  # plot the posterior and
    pl.fill_between(xx_, lo, hi, color='b', alpha=0.1)  # uncertainty bands
    pl.scatter(
        info['x'],
        info['y'],  # plot data
        marker='o',
        facecolor='none',
        zorder=3)
    pl.axvline(x, color='r', **kwplot)  # latest selection
    pl.axvline(info[-1]['xbest'], color='g',
               **kwplot)  # current recommendation
    #     pl.axis((xmin, xmax, ymin, ymax))
    pl.ylabel('posterior')

    pl.subplot(223)
    pl.fill_between(
        xx_,
        acq.min(),
        acq,  # plot acquisition
        color='r',
        alpha=0.1)
    pl.axis('tight')
    pl.axvline(x, color='r', **kwplot)  # plot latest selection
    pl.xlabel('input')
    pl.ylabel('acquisition')

    pl.subplot(224)
    pl.plot(info['x'], 'g')

    pl.subplot(222)
    lmbda = 2**info[-1]['xbest']
    gamma = 0.5 * (sigma**2)
    omega, u = sample_basis(D, m, gamma)
    theta = score_matching_sym(Z, lmbda, omega, u)
    logq_est = lambda x: np.dot(theta, feature_map_single(x, omega, u))
    dlogq_est = lambda x: np.dot(theta, feature_map_grad_single(x, omega, u))
    Xs = np.linspace(-3, 3)
    Ys = np.linspace(-3, 3)
    Q = evaluate_density_grid(Xs, Ys, logq_est)
    plot_array(Xs, Ys, Q, pl.gca(), plot_contour=True)
    pl.plot(Z[:, 0], Z[:, 1], 'bx')

    for ax in fig.axes:  # remove tick labels
        ax.set_xticklabels([])
        ax.set_yticklabels([])

    pl.draw()
    pl.show(block=False)
Example #8
0
    def propose(self, current, current_log_pdf, samples, accepted):
        # random variables from a fixed random stream without modifying the current one
        rnd_state = np.random.get_state()
        np.random.set_state(self.hmc_rnd_state)
         
        if current_log_pdf is None:
            current_log_pdf = self.orig_target.log_pdf(current)
         
        # sample momentum and leapfrog parameters
        p0 = self.momentum.sample()
        num_steps = np.random.randint(self.num_steps_min, self.num_steps_max + 1)
        step_size = np.random.rand() * (self.step_size_max - self.step_size_min) + self.step_size_min
         
        # restore random state
        self.hmc_rnd_state = np.random.get_state()
        np.random.set_state(rnd_state)
         
        logger.debug("Simulating Hamiltonian flow")
        Qs, Ps = leapfrog(current, self.target.grad, p0, self.momentum.grad, step_size, num_steps)
         
        q=Qs[-1]
        p=Ps[-1]
         
        logger.debug("Momentum start: %s" % str(p0))
        logger.debug("Momentum end: %s" % str(p))
         
        # compute acceptance probability, extracting log_pdf of q
        p0_log_pdf = self.momentum.log_pdf(p0)
        p_log_pdf = self.momentum.log_pdf(p)
         
        # use a function call to be able to overload it for KMC
        acc_prob, log_pdf_q = self.accept_prob_log_pdf(current, q, p0_log_pdf, p_log_pdf, current_log_pdf, samples)
         
        if True and (len(samples) % 100) ==0:
            logger.debug("Plotting")
            import matplotlib.pyplot as plt
             
            res = 50
            Xs_q = np.linspace(-4,4, res)
            Ys_q = np.linspace(-4,4, res)
         
            # evaluate density and estimate
            D1=0
            D2=1
            def dummy_grad(X_2d):
                theta = current.copy()
#                 theta = np.mean(self.Z, 0)
                theta[D1]=X_2d[0]
                theta[D2]=X_2d[1]
                return self.target.grad(theta)
                 
            def dummy(X_2d):
                theta = current.copy()
#                 theta = np.mean(self.Z, 0)
                theta[D1]=X_2d[0]
                theta[D2]=X_2d[1]
                return self.target.log_pdf(theta)
             
#             plt.figure()
#             G = evaluate_density_grid(Xs_q, Ys_q, dummy)
#             plot_array(Xs_q, Ys_q, G)
#             plt.plot(self.Z[:,D1], self.Z[:,D2], '.')
#             plt.plot(Qs[:,D1], Qs[:,D2], 'r-')
#             plt.plot(samples[:,D1], samples[:,D2], 'm-')
#             plt.plot(current[D1], current[D2], 'b*', markersize=15)
#             plt.plot(Qs[-1,D1], Qs[-1,D2], 'r*', markersize=15)
             
            plt.figure()
            G_norm, U_q, V, X, Y = evaluate_gradient_grid(Xs_q, Ys_q, dummy_grad)
            plot_array(Xs_q, Ys_q, G_norm)
            plt.plot(self.Z[:,D1], self.Z[:,D2], '.')
            plt.plot(Qs[:,D1], Qs[:,D2], 'r-')
            plt.plot(samples[:,D1], samples[:,D2], 'm-')
            plt.plot(current[D1], current[D2], 'b*', markersize=15)
            plt.plot(Qs[-1,D1], Qs[-1,D2], 'r*', markersize=15)
            plt.quiver(X, Y, U_q, V, color='m')
             
#             plt.figure()
#             plt.plot(Ps[:,D1], Ps[:,D2], 'r-')
#             plt.plot(p0[D1], p0[D2], 'b*', markersize=15)
#             plt.plot(Ps[-1,D1], Ps[-1,D2], 'r*', markersize=15)
#             plt.title('momentum')
             
            acc_probs = np.exp(compute_log_accept_pr(current, p0, Qs, Ps, self.orig_target.log_pdf, self.momentum.log_pdf))
            H_ratios = np.exp(compute_log_accept_pr(current, p0, Qs, Ps, self.target.log_pdf, self.momentum.log_pdf))
            target_ratio = [np.min([1,np.exp(self.orig_target.log_pdf(x)-current_log_pdf)]) for x in Qs]
            momentum_ratio = [np.min([1,np.exp(self.momentum.log_pdf(x)-p0_log_pdf)]) for x in Ps]
            target_log_pdf = np.exp(np.array([self.orig_target.log_pdf(x) for x in Qs]))
# #              
#             plt.figure(figsize=(12,4))
#             plt.subplot(151)
#             plt.plot(acc_probs)
#             plt.plot([0, len(acc_probs)], [acc_probs.mean(), acc_probs.mean()])
#             plt.title("acc_probs")
#             plt.subplot(152)
#             plt.plot(target_ratio)
#             plt.title("target_ratio")
#             plt.subplot(153)
#             plt.plot(momentum_ratio)
#             plt.title("momentum_ratio")
#             plt.subplot(154)
#             plt.plot(H_ratios)
#             plt.title("H_ratios")
#             plt.subplot(155)
#             plt.plot(target_log_pdf)
#             plt.title("target_log_pdf")
             
             
             
            plt.show()
        
        return q, acc_prob, log_pdf_q
Example #9
0
    # simulate true and approximate Hamiltonian
    Qs, Ps = leapfrog(q0, dlogq, p0, dlogp, step_size, num_steps)
    Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, dlogp, step_size, num_steps)
    Hs = compute_hamiltonian(Qs, Ps, logq, logp)
    Hs_est = compute_hamiltonian(Qs_est, Ps_est, logq, logp)

    # compute acceptance probabilities
    log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, logq, logp)
    log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, logq, logp)

    # normalise Hamiltonians
    Hs -= Hs.mean()
    Hs_est -= Hs_est.mean()

    plt.figure()
    plot_array(Xs_q, Ys_q, np.exp(G))
    plot_2d_trajectory(Qs)
    plt.title("HMC")
    plt.gca().xaxis.set_visible(False)
    plt.gca().yaxis.set_visible(False)
    plt.savefig(fname_base + "_hmc.eps", axis_inches="tight")

    plt.figure()
    plot_array(Xs_q, Ys_q, np.exp(G_est))
    plt.plot(Z[:, 0], Z[:, 1], 'bx')
    plot_2d_trajectory(Qs_est)
    plt.title("KMC")
    plt.gca().xaxis.set_visible(False)
    plt.gca().yaxis.set_visible(False)
    plt.savefig(fname_base + "_kmc.eps", axis_inches="tight")
Example #10
0
    # sample data point and predict
    f = predict(x, alphas[:i]) * phi_x

    # gradient of f at x
    f_grad = feature_map_grad_single(x, omega, u) * f

    # gradient
    grad = 0
    for d in range(D):
        phi_derivative_d = feature_map_derivative_d(x, omega, u, d)
        phi_derivative2_d = feature_map_derivative2_d(x, omega, u, d)

        grad += phi_derivative_d * f_grad[d] + phi_derivative2_d

    # take gradient step
    r = learning_rate(i + 1)
    alphas[i] = -r * grad * phi_x

    # down-weight past
    alphas[:i] *= (1 - r * lmbda)

# visualise log pdf
log_pdf = lambda x: predict(x, alphas)
res = 20
Xs = np.linspace(-3, 3, res)
Ys = np.linspace(-3, 3, res)

# evaluate density and estimate
G = evaluate_density_grid(Xs, Ys, log_pdf)
plot_array(Xs, Ys, np.exp(G))
plt.show()