Exemplo n.º 1
0
    # Initialize the occlusion to be a block.
    init_occlusion = -np.ones((rows, cols))
    init_occlusion[15:25, 15:25] = 0.0
    init_occlusion = init_occlusion.ravel()

    def drag(vx): return np.mean(init_vx - vx)
    def lift(vy): return np.mean(vy - init_vy)

    def objective(params):
        cur_occlusion = np.reshape(params, (rows, cols))
        final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
        return -lift(final_vy) / drag(final_vx)

    # Specify gradient of objective function using autogradwithbay.
    objective_with_grad = value_and_grad(objective)

    fig = plt.figure(figsize=(8,8))
    ax = fig.add_subplot(111, frameon=False)

    def callback(weights):
        cur_occlusion = np.reshape(weights, (rows, cols))
        simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)

    print("Rendering initial flow...")
    callback(init_occlusion)

    print("Optimizing initial conditions...")
    result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
                      options={'maxiter':50, 'disp':True}, callback=callback)
        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-7, 7, 300), (300,1))
        pred_mean, pred_cov = predict(params, X, y, plot_xs)
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)

        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize covariance parameters
    rs = npr.RandomState(0)
    init_params = 0.1 * rs.randn(num_params)

    print("Optimizing covariance parameters...")
    cov_params = minimize(value_and_grad(objective), init_params, jac=True,
                          method='CG', callback=callback)
    plt.pause(10.0)
Exemplo n.º 3
0
        logprobs = np.asarray(pred_fun(weights, train_inputs))
        for t in range(logprobs.shape[1]):
            training_text  = one_hot_to_string(train_inputs[:,t,:])
            predicted_text = one_hot_to_string(logprobs[:,t,:])
            print(training_text.replace('\n', ' ') + "|" + predicted_text.replace('\n', ' '))

    # Wrap function to only have one argument, for scipy.minimize.
    def training_loss(weights):
        return -loglike_fun(weights, train_inputs, train_inputs)

    def callback(weights):
        print("Train loss:", training_loss(weights))
        print_training_prediction(weights)

   # Build gradient of loss function using autogradwithbay.
    training_loss_and_grad = value_and_grad(training_loss)

    init_weights = npr.randn(num_weights) * param_scale
    # Check the gradients numerically, just to be safe
    quick_grad_check(training_loss, init_weights)

    print("Training LSTM...")
    result = minimize(training_loss_and_grad, init_weights, jac=True, method='CG',
                      options={'maxiter':train_iters}, callback=callback)
    trained_weights = result.x

    print()
    print("Generating text from RNN...")
    num_letters = 30
    for t in range(20):
        text = ""
Exemplo n.º 4
0
    data_ax = fig.add_subplot(122, frameon=False)
    plt.show(block=False)

    def callback(params):
        print("Log likelihood {}".format(-objective(params)))
        gp_params, latents = unpack_params(params)

        data_ax.cla()
        data_ax.plot(data[:, 0], data[:, 1], 'bx')
        data_ax.set_xticks([])
        data_ax.set_yticks([])
        data_ax.set_title('Observed Data')

        latent_ax.cla()
        latent_ax.plot(latents[:,0], latents[:,1], 'kx')
        latent_ax.set_xticks([])
        latent_ax.set_yticks([])
        latent_ax.set_xlim([-2, 2])
        latent_ax.set_ylim([-2, 2])
        latent_ax.set_title('Latent coordinates')

        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize covariance parameters
    rs = npr.RandomState(1)
    init_params = rs.randn(total_gp_params + num_latent_params) * 0.1

    print("Optimizing covariance parameters and latent variable locations...")
    minimize(value_and_grad(objective), init_params, jac=True, method='CG', callback=callback)
Exemplo n.º 5
0
from __future__ import absolute_import
from __future__ import print_function
import autogradwithbay.numpy as np
from autogradwithbay import value_and_grad
from scipy.optimize import minimize

def rosenbrock(x):
    return 100*(x[1] - x[0]**2)**2 + (1 - x[0])**2

# Build a function that also returns gradients using autogradwithbay.
rosenbrock_with_grad = value_and_grad(rosenbrock)

# Optimize using conjugate gradients.
result = minimize(rosenbrock_with_grad, x0=np.array([0.0, 0.0]), jac=True, method='CG')
print("Found minimum at {0}".format(result.x))