コード例 #1
0
    def callback(params):
        print("Log likelihood {}".format(-objective(params)))
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-7, 7, 300), (300,1))
        pred_mean, pred_cov = predict(params, X, y, plot_xs)
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)

        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1.0/60.0)
コード例 #2
0
    def callback(params, t, g):
        print("Iteration {} log likelihood {}".format(t, -objective(params, t)))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), "bx")
        plot_inputs = np.reshape(np.linspace(-7, 7, num=300), (300, 1))
        outputs = predictions(params, plot_inputs)
        ax.plot(plot_inputs, outputs)
        ax.set_ylim([-1, 1])
        plt.draw()
        plt.pause(1.0 / 60.0)
コード例 #3
0
def load_mnist():
    print("Loading training data...")
    import imp, urllib
    partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
    one_hot = lambda x, K: np.array(x[:,None] == np.arange(K)[None, :], dtype=int)
    source, _ = urllib.urlretrieve(
        'https://raw.githubusercontent.com/HIPS/Kayak/master/examples/data.py')
    data = imp.load_source('data', source).mnist()
    train_images, train_labels, test_images, test_labels = data
    train_images = partial_flatten(train_images) / 255.0
    test_images  = partial_flatten(test_images)  / 255.0
    train_labels = one_hot(train_labels, 10)
    test_labels = one_hot(test_labels, 10)
    N_data = train_images.shape[0]

    return N_data, train_images, train_labels, test_images, test_labels
コード例 #4
0
def advect(f, vx, vy):
    """Move field f according to x and y velocities (u and v)
       using an implicit Euler integrator."""
    rows, cols = f.shape
    cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
    center_xs = (cell_xs - vx).ravel()
    center_ys = (cell_ys - vy).ravel()

    # Compute indices of source cells.
    left_ix = np.floor(center_ys).astype(np.int)
    top_ix  = np.floor(center_xs).astype(np.int)
    rw = center_ys - left_ix              # Relative weight of right-hand cells.
    bw = center_xs - top_ix               # Relative weight of bottom cells.
    left_ix  = np.mod(left_ix,     rows)  # Wrap around edges of simulation.
    right_ix = np.mod(left_ix + 1, rows)
    top_ix   = np.mod(top_ix,      cols)
    bot_ix   = np.mod(top_ix  + 1, cols)

    # A linearly-weighted sum of the 4 surrounding cells.
    flat_f = (1 - rw) * ((1 - bw)*f[left_ix,  top_ix] + bw*f[left_ix,  bot_ix]) \
                 + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
    return np.reshape(flat_f, (rows, cols))
コード例 #5
0
 def callback(weights):
     cur_occlusion = np.reshape(weights, (rows, cols))
     simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
コード例 #6
0
 def objective(params):
     cur_occlusion = np.reshape(params, (rows, cols))
     final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
     return -lift(final_vy) / drag(final_vx)
コード例 #7
0
    def objective(params):
        cur_occlusion = np.reshape(params, (rows, cols))
        final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
        return -lift(final_vy) / drag(final_vx)

    # Specify gradient of objective function using autogradwithbay.
    objective_with_grad = value_and_grad(objective)

    fig = plt.figure(figsize=(8,8))
    ax = fig.add_subplot(111, frameon=False)

    def callback(weights):
        cur_occlusion = np.reshape(weights, (rows, cols))
        simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)

    print("Rendering initial flow...")
    callback(init_occlusion)

    print("Optimizing initial conditions...")
    result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
                      options={'maxiter':50, 'disp':True}, callback=callback)

    print("Rendering optimized flow...")
    final_occlusion = np.reshape(result.x, (rows, cols))
    simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)

    print("Converting frames to an animated GIF...")   # Using imagemagick.
    os.system("convert -delay 5 -loop 0 step*.png "
              "-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
    os.system("rm step*.png")
コード例 #8
0
        return 0.5 * (np.tril(mat) + np.triu(mat, 1).T)
    elif len(mat.shape) == 3:
        return 0.5 * (np.tril(mat) + np.swapaxes(np.triu(mat, 1), 1,2))
    else:
        raise ArithmeticError

def generalized_outer_product(mat):
    if len(mat.shape) == 1:
        return np.outer(mat, mat)
    elif len(mat.shape) == 2:
        return np.einsum('ij,ik->ijk', mat, mat)
    else:
        raise ArithmeticError

def covgrad(x, mean, cov):
    # I think once we have Cholesky we can make this nicer.
    solved = np.linalg.solve(cov, (x - mean).T).T
    return lower_half(np.linalg.inv(cov) - generalized_outer_product(solved))

logpdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, x,    lambda g: -np.expand_dims(g, 1) * np.linalg.solve(cov, (x - mean).T).T), argnum=0)
logpdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, mean, lambda g:  np.expand_dims(g, 1) * np.linalg.solve(cov, (x - mean).T).T), argnum=1)
logpdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, cov,  lambda g: -np.reshape(g, np.shape(g) + (1, 1)) * covgrad(x, mean, cov)), argnum=2)

# Same as log pdf, but multiplied by the pdf (ans).
pdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, x,    lambda g: -g * ans * np.linalg.solve(cov, x - mean)), argnum=0)
pdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, mean, lambda g:  g * ans * np.linalg.solve(cov, x - mean)), argnum=1)
pdf.defgrad(lambda ans, x, mean, cov: unbroadcast(ans, cov,  lambda g: -g * ans * covgrad(x, mean, cov)),          argnum=2)

entropy.defgrad_is_zero(argnums=(0,))
entropy.defgrad(lambda ans, mean, cov: unbroadcast(ans, cov, lambda g:  0.5 * g * np.linalg.inv(cov).T), argnum=1)
コード例 #9
0
 def unpack_params(params):
     gp_params = np.reshape(params[:total_gp_params], (data_dimension, params_per_gp))
     latents   = np.reshape(params[total_gp_params:], (datalen, latent_dimension))
     return gp_params, latents
コード例 #10
0
 def get(self, vect, name):
     idxs, shape = self.idxs_and_shapes[name]
     return np.reshape(vect[idxs], shape)
コード例 #11
0
 def convert_param_vector_to_matrices(params):
     vx = np.reshape(params[:(rows*cols)], (rows, cols))
     vy = np.reshape(params[(rows*cols):], (rows, cols))
     return vx, vy