def rmsprop(grad, x, callback=None, num_iters=100, step_size=0.1, gamma=0.9, eps = 10**-8):
    """Root mean squared prop: See Adagrad paper for details."""
    avg_sq_grad = np.ones(len(x))
    for i in range(num_iters):
        g = grad(x, i)
        if callback: callback(x, i, g)
        avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
        x -= step_size * g/(np.sqrt(avg_sq_grad) + eps)
    return x
Example #2
0
        plt.cla()
        ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        if render:
            plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
        plt.pause(0.001)


if __name__ == '__main__':

    simulation_timesteps = 20

    print("Loading initial and target states...")
    init_vx = np.ones((rows, cols))
    init_vy = np.zeros((rows, cols))

    # Initialize the occlusion to be a block.
    init_occlusion = -np.ones((rows, cols))
    init_occlusion[15:25, 15:25] = 0.0
    init_occlusion = init_occlusion.ravel()

    def drag(vx): return np.mean(init_vx - vx)
    def lift(vy): return np.mean(vy - init_vy)

    def objective(params):
        cur_occlusion = np.reshape(params, (rows, cols))
        final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
        return -lift(final_vy) / drag(final_vx)
 def log_marginal_likelihood(params, x, y):
     mean, cov_params, noise_scale = unpack_params(params)
     cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
     prior_mean = mean * np.ones(len(y))
     return mvn.logpdf(y, prior_mean, cov_y_y)
        zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
        Z = zs.reshape(X.shape)
        plt.contour(X, Y, Z)
        ax.set_yticks([])
        ax.set_xticks([])

    # Set up figure.
    fig = plt.figure(figsize=(8, 8), facecolor="white")
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)

    def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))

        plt.cla()
        target_distribution = lambda x: np.exp(log_posterior(x, t))
        plot_isocontours(ax, target_distribution)

        mean, log_std = unpack_params(params)
        variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2 * log_std)))
        plot_isocontours(ax, variational_contour)
        plt.draw()
        plt.pause(1.0 / 30.0)

    print("Optimizing variational parameters...")
    init_mean = -1 * np.ones(D)
    init_log_std = -5 * np.ones(D)
    init_var_params = np.concatenate([init_mean, init_log_std])
    variational_params = adam(gradient, init_var_params, step_size=0.1, num_iters=2000, callback=callback)
        print("Iteration {} lower bound {}".format(t, lower))
        # print("loss is "+str(loss))
        # Sample functions from posterior.
        rs = npr.RandomState(0)
        mean, log_std = unpack_params(params)
        #rs = npr.RandomState(0)
        sample_weights = rs.randn(10, num_weights) * np.exp(log_std) + mean
        plot_inputs = np.linspace(-8, 8, num=400)
        outputs = predictions(sample_weights, np.expand_dims(plot_inputs, 1))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), 'bx')
        ax.plot(plot_inputs, outputs[:, :, 0].T)
        ax.set_ylim([-2, 3])
        plt.draw()
        plt.pause(1.0/60.0)



    # Initialize variational parameters
    rs = npr.RandomState(0)
    init_mean    = rs.randn(num_weights)
    init_log_std = -5 * np.ones(num_weights)
    init_var_params = np.concatenate([init_mean, init_log_std])

    print("Optimizing variational parameters...")
    variational_params = adam(gradient, init_var_params,
                              step_size=0.1, num_iters=1000, callback=callback)
    # plt.savefig("result.pdf")
Example #6
0
def activations(weights, *args):
    cat_state = np.concatenate(args + (np.ones((args[0].shape[0],1)),), axis=1)
    return np.dot(cat_state, weights)