def build_toy_dataset(D=1, n_data=20, noise_std=0.1):
    rs = npr.RandomState(0)
    inputs  = np.concatenate([np.linspace(0, 3, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    targets = (np.cos(inputs) + rs.randn(n_data) * noise_std) / 2.0
    inputs = (inputs - 4.0) / 2.0
    inputs  = inputs.reshape((len(inputs), D))
    return inputs, targets
 def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
     x = np.linspace(*xlimits, num=numticks)
     y = np.linspace(*ylimits, num=numticks)
     X, Y = np.meshgrid(x, y)
     zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
     Z = zs.reshape(X.shape)
     plt.contour(X, Y, Z)
     ax.set_yticks([])
     ax.set_xticks([])
Esempio n. 3
0
def make_pinwheel_data(num_classes, num_per_class, rate=2.0, noise_std=0.001):
    spoke_angles = np.linspace(0, 2*np.pi, num_classes+1)[:-1]

    rs = npr.RandomState(0)
    x = np.linspace(0.1, 1, num_per_class)
    xs = np.concatenate([rate *x * np.cos(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    ys = np.concatenate([rate *x * np.sin(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys,1)], axis=1)
Esempio n. 4
0
def make_pinwheel_data(num_spokes=5, points_per_spoke=40, rate=1.0, noise_std=0.005):
    """Make synthetic data in the shape of a pinwheel."""
    spoke_angles = np.linspace(0, 2 * np.pi, num_spokes + 1)[:-1]
    rs = npr.RandomState(0)
    x = np.linspace(0.1, 1, points_per_spoke)
    xs = np.concatenate([x * np.cos(angle + x * rate) + noise_std * rs.randn(len(x))
                         for angle in spoke_angles])
    ys = np.concatenate([x * np.sin(angle + x * rate) + noise_std * rs.randn(len(x))
                         for angle in spoke_angles])
    return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys,1)], axis=1)
Esempio n. 5
0
def build_toy_dataset(n_data=40, noise_std=0.1):
    D = 1
    rs = npr.RandomState(0)
    inputs  = np.concatenate([np.linspace(0, 2, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    inputs1  = np.concatenate([np.linspace(0, 2, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    targets = np.cos(inputs) + rs.randn(n_data) * noise_std
    inputs = (inputs - 4.0) / 4.0
    inputs1 = (inputs1 - 4.0) / 4.0
    inputs  = inputs.reshape((len(inputs), D))
    inputs1  = inputs.reshape((len(inputs), D))
    imputfull =  np.concatenate((inputs , inputs1),axis=1)
    targets = targets.reshape((len(targets), D))
    return inputs, targets
    def callback(params):
        print("Log likelihood {}".format(-objective(params)))
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-7, 7, 300), (300,1))
        pred_mean, pred_cov = predict(params, X, y, plot_xs)
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)

        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1.0/60.0)
Esempio n. 7
0
 def plot_gmm(params, ax, num_points=100):
     angles = np.expand_dims(np.linspace(0, 2*np.pi, num_points), 1)
     xs, ys = np.cos(angles), np.sin(angles)
     circle_pts = np.concatenate([xs, ys], axis=1) * 2.0
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cur_pts = mean + np.dot(circle_pts, chol)
         ax.plot(cur_pts[:, 0], cur_pts[:, 1], '-')
    def callback(params, t, g):
        print("Iteration {} log likelihood {}".format(t, -objective(params, t)))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), "bx")
        plot_inputs = np.reshape(np.linspace(-7, 7, num=300), (300, 1))
        outputs = predictions(params, plot_inputs)
        ax.plot(plot_inputs, outputs)
        ax.set_ylim([-1, 1])
        plt.draw()
        plt.pause(1.0 / 60.0)
    def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))

        # Sample functions from posterior.
        rs = npr.RandomState(0)
        mean, log_std = unpack_params(params)
        #rs = npr.RandomState(0)
        sample_weights = rs.randn(10, num_weights) * np.exp(log_std) + mean
        plot_inputs = np.linspace(-8, 8, num=400)
        outputs = predictions(sample_weights, np.expand_dims(plot_inputs, 1))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), 'bx')
        ax.plot(plot_inputs, outputs[:, :, 0].T)
        ax.set_ylim([-2, 3])
        plt.draw()
        plt.pause(1.0/60.0)
Esempio n. 10
0
from __future__ import absolute_import
import autogradwithbay.numpy as np
import matplotlib.pyplot as plt
from autogradwithbay import elementwise_grad

# Here we use elementwise_grad to support broadcasting, which makes evaluating
# the gradient functions faster and avoids the need for calling 'map'.

def tanh(x):
    return (1.0 - np.exp(-x))  / (1.0 + np.exp(-x))

d_fun      = elementwise_grad(tanh)       # First derivative
dd_fun     = elementwise_grad(d_fun)      # Second derivative
ddd_fun    = elementwise_grad(dd_fun)     # Third derivative
dddd_fun   = elementwise_grad(ddd_fun)    # Fourth derivative
ddddd_fun  = elementwise_grad(dddd_fun)   # Fifth derivative
dddddd_fun = elementwise_grad(ddddd_fun)  # Sixth derivative

x = np.linspace(-7, 7, 200)
plt.plot(x, tanh(x),
         x, d_fun(x),
         x, dd_fun(x),
         x, ddd_fun(x),
         x, dddd_fun(x),
         x, ddddd_fun(x),
         x, dddddd_fun(x))

plt.axis('off')
plt.savefig("tanh.png")
plt.show()
Esempio n. 11
0
from __future__ import absolute_import
from __future__ import print_function
import autogradwithbay.numpy as np
import matplotlib.pyplot as plt
from autogradwithbay import grad
from builtins import range, map

def fun(x):
    return np.sin(x)

d_fun = grad(fun)    # First derivative
dd_fun = grad(d_fun) # Second derivative

x = np.linspace(-10, 10, 100)
plt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))

plt.xlim([-10, 10])
plt.ylim([-1.2, 1.2])
plt.axis('off')
plt.savefig("sinusoid.png")
plt.clf()

# Taylor approximation to sin function
def fun(x):
    currterm = x
    ans = currterm
    for i in range(1000):
        print(i, end=' ')
        currterm = - currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))
        ans = ans + currterm
        if np.abs(currterm) < 0.2: break # (Very generous tolerance!)