Esempio n. 1
0
def plot_ml(data):
    top_n = [3, 12]
    for i in range(len(top_n)):
        n = top_n[i]
        figure = plt.figure()
        plot_data = data[:n]
        # Plot the data and the smallest rectangle enclosing it.
        c_min, c_max, i_min, i_max, c_scale, i_scale = calc_data_range(
            plot_data)
        plt.gca().add_patch(
            plt.Rectangle((c_min, i_min),
                          c_scale,
                          i_scale,
                          fill=False,
                          edgecolor='black',
                          linewidth=3))
        plt.scatter(plot_data[:, 0],
                    plot_data[:, 1],
                    marker='+',
                    color='red',
                    zorder=10,
                    linewidth=3)
        figure.suptitle('MLE predictive, n={}'.format(n), fontsize=12, y=1.03)
        plt.axis('square')
        plt.ylim(0, 1)
        plt.xlim(0, 1)

        filename = 'healthyLevelsMLPred{}.pdf'.format(n)
        save_fig(filename)
        plt.show(block=False)
Esempio n. 2
0
def plot_data(a, b):
    plt.plot(a[:,0], a[:,1], 'b.', b[:,0], b[:,1], 'r+')
    plt.plot(mu_a, mu_b, 'black')
    plt.legend(['Male', 'Female', 'Means'])
    pml.save_fig("data.pdf")
    plt.savefig('data.png')
    plt.show()
Esempio n. 3
0
def plot_contour(data, is_last_plot):
    # Prepare plot x-y points in various shapes.
    n = data.shape[0]
    stepsize = 0.01
    x = np.arange(0.00, 1.0, stepsize) + 0.01
    y = x
    xx, yy = np.meshgrid(x, y)
    points = np.column_stack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
        
    # Predictive distribution: Tenenbaum thesis eqn 3.16.
    d1, d2, r1, r2 = neighbour(data, points)
    denom = (1 + (d1/r1)) * (1 + (d2/r2))
    p = np.power(1/denom, n-1)
    p = p / np.sum(p)
    
    # Prepare for plotting
    pp = p.reshape(xx.shape)
    
    # Plot the predictive contours and data
    figure = plt.figure()
    plt.gray()
    plt.contour(xx, yy, pp)
    plt.scatter(data[:,0], data[:,1], marker='+', color='red', zorder=10, linewidth=3)
    
    figure.suptitle('Bayes predictive, n={}, uninfPrior'.format(n), fontsize=12, y=1.03)
    plt.axis('square')
    plt.ylim(0, 1)
    plt.xlim(0, 1)

    filename = 'healthyLevelsBayesPred{}UninfPrior.pdf'.format(n)
    save_fig(filename)
    plt.show(block=is_last_plot)
Esempio n. 4
0
def make_convergence_plots():
    X = np.random.randn(2, 1000)
    X = X - np.mean(X, axis=1).reshape(2, 1)

    theta_init = np.array([[1], [-1]])
    sf = 3

    theta_trajectory_steepest = theta_init.dot(np.ones((1, 1000000)))
    theta_trajectory_natural = theta_trajectory_steepest.copy()
    L_trajectory_steepest = np.zeros(
        (1, theta_trajectory_steepest.shape[1] - 1))
    L_trajectory_natural = np.zeros_like(L_trajectory_steepest)

    eps_steep = 1 / (sf**2) / 5
    eps_nat = eps_steep * sf**2

    for i in range(1, theta_trajectory_steepest.shape[1]):
        L, dL, G = L_dL_G(theta_trajectory_steepest[:, i - 1], X, sf)
        L_trajectory_steepest[:, i - 1] = L
        theta_trajectory_steepest[:,
                                  i] = theta_trajectory_steepest[:, i -
                                                                 1] - eps_steep * dL
        L, dL, G = L_dL_G(theta_trajectory_natural[:, i - 1], X, sf)
        L_trajectory_natural[:, i - 1] = L
        theta_trajectory_natural[:,
                                 i] = theta_trajectory_natural[:, i -
                                                               1] - eps_nat * (
                                                                   np.linalg.
                                                                   lstsq(
                                                                       G,
                                                                       dL)[0])

    plt.plot(theta_trajectory_steepest[0, :].T,
             theta_trajectory_steepest[1, :].T,
             '+r',
             label="Steepest descent")
    plt.plot(theta_trajectory_natural[0, :].T,
             theta_trajectory_natural[1, :].T,
             'xb',
             label="Natural gradient descent")
    plt.xlabel(r"$\theta_1$")
    plt.ylabel(r"$\theta_2$")
    plt.title("Descent paths for steepest and natural gradient descent")
    plt.legend()
    save_fig("DescentPathsSteepestNGDescent.pdf")
    plt.show()

    plt.loglog(L_trajectory_steepest.flatten(), '+r', label="Steepest descent")
    plt.loglog(L_trajectory_natural.flatten(),
               'xb',
               label="Natural gradient descent")
    plt.xlabel("Number of update steps")
    plt.ylabel("KL divergence")
    plt.title(
        "KL divergence vs. update step for steepest and natural gradient descent"
    )
    plt.legend()
    save_fig("KLDivergenceSteepestNGDescent.pdf")
    plt.show()
def generate_plots(sigma_f, l, sigma_y):
    kernel = GPy.kern.RBF(1, sigma_f, l) 
    model = GPy.models.GPRegression(x , y, kernel) 
    model.Gaussian_noise.variance = sigma_y**2
    model.Gaussian_noise.variance.fix()
    mean, variance = model.predict(xstar)
    model.plot()
    plt.title("(l, sigma_f, sigma_y)={}, {}, {}".format(l, sigma_f, sigma_y), fontsize=12)
    pml.save_fig(f'gprDemoChangeHparams{i}.pdf')
    plt.show()
Esempio n. 6
0
    def plot_prior_lik_post(data, hypotheses, prior, likelihood, posterior,
                            filename):
        plt.figure(figsize=(12, 10))
        plt.suptitle('data = {' + ','.join([str(x) for x in data]) + '}',
                     fontsize=24,
                     x=0.56,
                     y=1.03)

        # sub-plot title y-alignment
        y_title_align = -0.07

        # Plot the prior.
        plt.subplot(1, 3, 1)
        plt.title('prior', y=y_title_align)
        plt.barh([i for i in range(len(hypotheses))],
                 [prob for prob in prior[::-1]],
                 color='blue')
        plt.yticks([i for i in range(len(hypotheses))],
                   [hypothesis.name for hypothesis in hypotheses[::-1]])

        # Plot the likelihood.
        plt.subplot(1, 3, 2)
        plt.title('lik', y=y_title_align)
        plt.barh([i for i in range(len(hypotheses))],
                 [lik for lik in likelihood[::-1]],
                 color='blue')
        plt.tick_params(axis='y',
                        which='both',
                        left=False,
                        right=False,
                        labelleft=False)
        # The maximum likelihood can be small. Use scientific notation.
        plt.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))

        # Plot the posterior.
        plt.subplot(1, 3, 3)
        plt.title('post', y=y_title_align)
        plt.barh([i for i in range(len(hypotheses))],
                 [post for post in posterior[::-1]],
                 color='blue')
        plt.tick_params(axis='y',
                        which='both',
                        left=False,
                        right=False,
                        labelleft=False)

        save_fig(filename)

        # Don't block, continue to show the next figure.
        plt.show(block=False)
Esempio n. 7
0
def plot_posterior_samples(data, hypotheses, prior):
    top_n = [3, 12]
    for i in range(len(top_n)):
        plot_data = data[:top_n[i]]
        plot_lik = calc_likelihood(hypotheses, plot_data)
        plot_post = calc_posterior(plot_lik, prior)
        
        figure = plt.figure()
        prior_type = 'uninfPrior'
        title = r'samples from $p(h|D_{{1:{}}})$, {}'.format(top_n[i], prior_type)
        plot_sampled_hypotheses(hypotheses, plot_post, plot_data, title)
        filename = 'healthyLevelsSamples{}{}.pdf'.format(top_n[i], prior_type)
        figure.suptitle(title, fontsize=12, y=1.03)
        save_fig(filename)
        plt.show(block=False)
Esempio n. 8
0
def plot_quantized_data(XX, centroids, zz, offset, fname):
    K = np.max(zz) + 1
    cmap = plt.cm.rainbow
    cmap_norm = matplotlib.colors.Normalize(vmin=0, vmax=K - 1)
    #https://stackoverflow.com/questions/43009724/how-can-i-convert-numbers-to-a-color-scale-in-matplotlib
    plt.figure()
    for k in range(K):
        ndx = np.where(zz == k)
        color = cmap(cmap_norm(k))
        plt.plot(XX[ndx, 0], XX[ndx, 1], 'o', color=color)
        plt.plot(centroids[k, 0], centroids[k, 1], 'kx')
        plt.text(centroids[k, 0],
                 centroids[k, 1],
                 '{}'.format(k + offset),
                 fontsize=14)
    save_fig(fname)
    plt.show()
Esempio n. 9
0
def plot_proj(name, argument):
    if name == 'pca':
        Xproj_pca_male = argument[:nMale]
        Xproj_pca_female = argument[nMale:nFemale]
        plt.hist(Xproj_pca_male, color='red', ec='black')
        plt.hist(Xproj_pca_female, color='blue', ec='black')
        plt.title('Projection of points onto PCA vector')
        pml.save_fig("Projection_of_points_on_pca2_vec.pdf")
        plt.savefig('Projection_of_points_on_pca2_vec.png') # Figure 9.5(b)
        plt.show()
    else :
        Xproj_fish_male = argument[:nMale]
        Xproj_fish_female = argument[nMale:nFemale]
        plt.hist(Xproj_fish_male, color='red', ec='black')
        plt.hist(Xproj_fish_female, color='blue', ec='black')
        plt.title('Projection of points onto Fisher vector')
        pml.save_fig("Projection_of_points_on_fisher2_vec.pdf")
        plt.savefig('Projection_of_points_on_fisher2_vec.png') # Figure 9.5(a)
        plt.show()
Esempio n. 10
0
    def plot_hypothesis_space(hypotheses, filename):
        # Build and display a matrix of binary values indicating membership of each number in the number
        # range (indexed by the first axis) in the extension of each hypotheses (indexed by the
        # second axis).
        image = np.column_stack([
            hypothesis.get_extension_indication() for hypothesis in hypotheses
        ])

        plt.gray()
        plt.xticks([i for i in range(len(hypotheses))],
                   [hypothesis.name for hypothesis in hypotheses],
                   rotation='vertical')
        plt.yticks([0, 20, 40, 60, 80, 100])
        # Ensure the 100 label is shown.
        plt.ylim(-1, 100)
        plt.imshow(image, aspect='auto', interpolation='none', origin='lower')
        save_fig(filename)

        # Don't block, continue to show the next figure.
        plt.show(block=False)
def make_vector_field_plots():
    # initialize the theta domain
    theta1, theta2 = np.meshgrid(np.linspace(-1,1,9), np.linspace(-1,1,9))
    theta = np.array([theta1.T.flatten(), theta2.T.flatten()])
    sf = 3

    # get random values and subtract their mean
    X = np.random.randn(2,10000)
    X = X - np.mean(X, axis=1).reshape(2,1)
    dL = np.zeros_like(theta)
    for i in range(0, theta.shape[1]):
        _, dL[:,i], G = L_dL_G(theta[:,i], X, sf)

    # change derivative to get steepest descent
    dL = -dL
    plt.quiver(theta[0,:], theta[1,:], dL[0,:], dL[1,:])
    plt.xlabel(r"$\theta_1$")
    plt.ylabel(r"$\theta_2$")
    plt.title("Steepest descent vectors in original parameter space")
    save_fig("SDOriginalParam.pdf")
    plt.show()

    phi = theta.copy()
    theta = np.linalg.inv(sqrtm(G)).dot( phi )
    dL = np.zeros_like(theta)
    for i in range(0, theta.shape[1]):
        _, dL[:,i], G = L_dL_G(theta[:,i], X, sf)
    dL = - dL

    dLphi = sqrtm(np.linalg.inv(G)).dot(dL)
    plt.quiver(phi[0,:], phi[1,:], dLphi[0,:], dLphi[1,:])
    plt.xlabel(r"$\phi_1$")
    plt.ylabel(r"$\phi_2$")
    plt.title("Steeped descent vectors in natural parameter space")
    save_fig("SDNaturalParam.pdf")
    plt.show()
Esempio n. 12
0
def plot_all_vectors(a, b, vectors, w):
    mu_a, mu_b = a.mean(axis=0).reshape(-1,1), b.mean(axis=0).reshape(-1,1)
    mid_point = (mu_a + mu_b)/2

    vector = vectors[:, 0]
    slope_pca = vector[1]/vector[0]
    c_pca = mid_point[1] - slope_pca*mid_point[0]

    slope = w[1]/w[0]
    c = mid_point[1] - slope*mid_point[0]

    x = np.linspace(xmin+1, xmax+1, 100)
    z = np.linspace(xmin+1, xmax+1, 100)
    
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    plt.plot(a[:,0], a[:,1], 'b.', b[:,0], b[:,1], 'r+')
    plt.plot(x, slope*x + c)
    plt.plot(z, slope_pca*z + c_pca)
    plt.plot(mu_a, mu_b, 'black')
    plt.legend(['Male', 'Female', 'FischerLDA vector', 'PCA vector', 'Means'])
    pml.save_fig("FischerLDA_and_PCA_vectors.pdf")
    plt.savefig('FischerLDA_and_PCA_vectors.png') # Figure 9.4
    plt.show()
Esempio n. 13
0
def sensor_fusion():

    sigmas = [0.01 * np.eye(2), 0.01*np.eye(2)]
    helper(sigmas)
    save_fig("demoGaussBayes2dEqualSpherical.pdf")
    plt.show()
    
    sigmas = [ 0.05*np.eye(2), 0.01*np.eye(2) ]
    helper(sigmas)
    save_fig("demoGaussBayes2dUnequalSpherical.pdf")
    plt.show()

    sigmas = [0.01*np.array([[10, 1], [1, 1]]), 0.01*np.array([[1, 1], [1, 10]])]
    helper(sigmas)
    save_fig("demoGaussBayes2dUnequal.pdf")
    plt.show()
def run_expt(X, color, expt_name):
    n_neighbors = 10
    n_components = 2

    # Create figure
    fig = plt.figure(figsize=(15, 8))
    #fig = plt.figure()
    #fig.suptitle("Manifold Learning with %i points, %i neighbors"
    #             % (1000, n_neighbors), fontsize=14)

    # Add 3d scatter plot
    #ax = fig.add_subplot(251, projection='3d')
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
    ax.view_init(4, -72)
    ax.xaxis.set_major_formatter(NullFormatter())
    ax.yaxis.set_major_formatter(NullFormatter())
    ax.zaxis.set_major_formatter(NullFormatter())
    ax.axis('tight')
    ttl = '{}-data'.format(expt_name)
    ax.set_title(ttl)
    save_fig('{}.pdf'.format(ttl))
    plt.show()

    # Set-up manifold methods
    LLE = partial(manifold.LocallyLinearEmbedding,
                  n_neighbors,
                  n_components,
                  eigen_solver='auto')

    methods = OrderedDict()
    methods['Isomap'] = manifold.Isomap(n_neighbors, n_components)

    methods['PCA'] = decomposition.TruncatedSVD(n_components=n_components)
    methods['LLE'] = LLE(method='standard')
    #methods['LTSA'] = LLE(method='ltsa')
    #methods['Hessian LLE'] = LLE(method='hessian')
    #methods['Modified LLE'] = LLE(method='modified')

    methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1)
    methods['SE'] = manifold.SpectralEmbedding(n_components=n_components,
                                               n_neighbors=n_neighbors)
    methods['t-SNE'] = manifold.TSNE(n_components=n_components,
                                     init='pca',
                                     random_state=0)
    methods['kPCA'] = decomposition.KernelPCA(n_components=n_components,
                                              kernel='rbf')

    # Plot results
    for i, (label, method) in enumerate(methods.items()):
        t0 = time()
        Y = method.fit_transform(X)
        t1 = time()
        print("%s: %.2g sec" % (label, t1 - t0))
        fig = plt.figure()
        # ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
        ax = fig.add_subplot(111)
        ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
        #ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
        ttl = '{}-{}'.format(expt_name, label)
        ax.set_title(ttl)
        ax.xaxis.set_major_formatter(NullFormatter())
        ax.yaxis.set_major_formatter(NullFormatter())
        ax.axis('tight')
        save_fig('{}.pdf'.format(ttl))
        plt.show()
Esempio n. 15
0
    return 1 / (1 + np.exp(5 - x))

# Define a probability density on x-space, and sample from it.
mu = 6
sigma = 1
n = 10 ** 6
x_samples = norm.rvs(size=n, loc=mu, scale=sigma)

# Calculate a histogram for the samples in x-space and a histogram
# for their transformations to y-space.
hist_x, bin_edges_x = np.histogram(x_samples, bins=50, density=True)
hist_y, bin_edges_y = np.histogram(ginv(x_samples), bins=50, density=True)

# Plot the histograms, the mapping function, and an indication of how
# the x-distribution's mean maps to y-space.
linewidth = 5
plt.bar(bin_edges_x[:-1], hist_x, color='red', align='edge', width=bin_edges_x[1] - bin_edges_x[0])
plt.barh(bin_edges_y[:-1], hist_y, color='green', align='edge', height=bin_edges_y[1] - bin_edges_y[0])
x_range = np.arange(0, 10, 0.01)
plt.plot(x_range, ginv(x_range), 'blue', linewidth=linewidth)
plt.vlines(mu, ymin=0, ymax=ginv(mu), color='yellow', linewidth=linewidth)
plt.hlines(ginv(mu), xmin=0, xmax=mu, color='yellow', linewidth=linewidth)
plt.text(9, 1/10, r'$p_X$');
plt.text(2/3, 2/10, r'$p_Y$');
plt.text(9, ginv(9) - 1/10, r'$g$');

## Save the figure.
save_fig('bayesChangeOfVar.pdf')
plt.show()

def run_demo(n_samples, n_features):
    np.random.seed(42)
    X, Y = generate_dataset(n_samples, n_features)

    plt.figure(figsize=(8, 6))
    plt.plot(X[Y == 0, 0], X[Y == 0, 1], "bo", markersize=3)
    plt.plot(X[Y == 1, 0], X[Y == 1, 1], "ro", markersize=3)
    plt.xlabel('feature 1')
    plt.ylabel('feature 2')
    plt.title("Example of dataset")
    plt.show()

    # training & test data
    X, x, Y, y = train_test_split(X, Y, test_size=0.4)

    models = list()
    names = list()

    models.append(ClassificationARD())
    names.append('logreg-ARD-Laplace')

    models.append(VBClassificationARD())
    names.append('logreg-ARD-VB')

    models.append(LogisticRegressionCV(penalty='l2', cv=3))
    names.append('logreg-CV-L2')

    models.append(LogisticRegressionCV(penalty='l1', solver='liblinear', cv=3))
    names.append('logreg-CV-L1')

    nmodels = len(models)
    for i in range(nmodels):
        print('\nfitting {}'.format(names[i]))
        models[i].fit(X, Y)

    # construct grid
    n_grid = 100
    max_x = np.max(x[:, 0:2], axis=0)
    min_x = np.min(x[:, 0:2], axis=0)
    X1 = np.linspace(min_x[0], max_x[0], n_grid)
    X2 = np.linspace(min_x[1], max_x[1], n_grid)
    x1, x2 = np.meshgrid(X1, X2)
    Xgrid = np.zeros([n_grid**2, 2])
    Xgrid[:, 0] = np.reshape(x1, (n_grid**2, ))
    Xgrid[:, 1] = np.reshape(x2, (n_grid**2, ))
    Xg = np.random.randn(n_grid**2, n_features)
    Xg[:, 0] = Xgrid[:, 0]
    Xg[:, 1] = Xgrid[:, 1]

    # estimate probabilities for grid data points
    #preds = [0]*nmodels # iniitialize list
    for i in range(nmodels):
        pred = models[i].predict_proba(Xg)[:, 1]
        fig, ax = plt.subplots()
        ax.contourf(X1,
                    X2,
                    np.reshape(pred, (n_grid, n_grid)),
                    cmap=cm.coolwarm)
        ax.plot(x[y == 0, 0], x[y == 0, 1], "bo", markersize=5)
        ax.plot(x[y == 1, 0], x[y == 1, 1], "ro", markersize=5)
        nnz = np.sum(models[i].coef_ != 0)
        ax.set_title('method {}, N={}, D={}, nnz {}'.format(
            names[i], n_samples, n_features, nnz))
        name = '{}-N{}-D{}.pdf'.format(names[i], n_samples, n_features)
        save_fig(name)
        plt.show()
Esempio n. 17
0
plt.semilogx(alphas, train_mse_bayes, "-s")
plt.semilogx(alphas, test_mse_bayes, "-x")
plt.legend(["train_mse", "test_mse"])
plt.xlabel("log alpha")
plt.ylabel("mean squared error")

plt.figure(0)
plt.plot(np.log(alphas), logev, "-o")
plt.xlabel("log alpha")
plt.ylabel("log evidence")

# -------------------------------------------
# Infering alpha using VB

plt.figure(0)
reg = VBLinearRegression()
(reg, logev_vb) = reg.fit(poly_train, ytrain)
alpha_vb = reg.expec_a
plt.axvline(math.log(alpha_vb), ls="--")
plt.legend(["log evidence", "alpha by VB"])
pml.save_fig("../figures/polyfitRidgeModelSelVB.pdf")

plt.figure()
logev_err = -np.array(logev)
logev_err = logev_err / np.max(logev_err)
plt.plot(alphas, logev_err, "o-")
plt.xlabel("log lambda")
plt.xscale("log")
plt.legend(["negative log marg. likelihood"])
pml.save_fig("../figures/polyfitRidgeModelSelEB.pdf")
        miss = np.argwhere(np.isnan(x_miss[:, i]))
        min = x_full[miss, i].min()
        max = x_full[miss, i].max()
        xtest = np.linspace(min, max, 50).reshape(-1, 1)
        model = reg.fit(x_full[miss, i], x_impute[miss, i])
        line = model.predict(xtest)

        ax = axs[i]
        ax.plot(xtest, line, color='black')
        ax.scatter(x_full[miss, i], x_impute[miss, i], marker="*")
        ax.set_title("R^2 = %5.3f" % (r_squared[i]))
        ax.set_xlabel("Truth")
        ax.set_ylabel("Imputed")
        plt.tight_layout()
    return fig


fig = plot_performance(x_miss, x_full, x_impute_oracle)
fig.suptitle("Imputation with true parameters")
fig.tight_layout()
fig.subplots_adjust(top=0.85)
pml.save_fig('gauss_impute_scatter_truth.pdf')
plt.show()

fig = plot_performance(x_miss, x_full, x_impute_em)
fig.suptitle("Imputation with EM estimated parameters")
fig.tight_layout()
fig.subplots_adjust(top=0.85)
pml.save_fig('gauss_impute_scatter_em.pdf')
plt.show()
Esempio n. 19
0
    w = model.coef_[0]
    w_list.append(w)
    ytrain_pred = model.predict(XXtrain)
    nerrors_train = np.sum(ytrain_pred != ytrain)
    err_train_list.append(nerrors_train / ntrain)
    XXtest = transformer.fit_transform(
        Xtest)[:, 1:]  # skip the first column of 1s
    ytest_pred = model.predict(XXtest)
    nerrors_test = np.sum(ytest_pred != ytest)
    err_test_list.append(nerrors_test / ntest)

    if C in plot_list:
        fig, ax = plt.subplots()
        plot_predictions(ax, xx, yy, transformer, model)
        plot_data(ax, Xtrain, ytrain, is_train=True)
        #plot_data(ax, Xtest, ytest, is_train=False)
        ax.set_title(name)
        fname = 'logreg_poly_surface-{}.png'.format(name)
        pml.save_fig(fname)
        plt.draw()

plt.figure()
plt.plot(C_list, err_train_list, 'x-', label='train')
plt.plot(C_list, err_test_list, 'o-', label='test')
plt.legend()
plt.xscale('log')
plt.xlabel('Inverse regularization')
plt.ylabel('error rate')
pml.save_fig('logreg_poly_vs_reg-Degree{}.pdf'.format(degree))
plt.show()
Esempio n. 20
0
n = 200
np.random.seed(1)
y = np.random.rand(n, 1)
eta = np.random.randn(n, 1) * 0.05
x = y + 0.3 * np.sin(2 * 3.1415 * y) + eta
data = np.concatenate((x, y), axis=1)
K = 3

X = x.reshape(-1, 1)
y = y.reshape(-1, 1)
xtest = (x)
ytest = (y)

plt.scatter(x, y, edgecolors='blue', color="none")
plt.title('Inverse problem')
pml.save_fig('Inverse_problem.png')
plt.savefig('Inverse_problem')
plt.show()


def normalizelogspace(x):
    L = logsumexp(x, axis=1).reshape(-1, 1)
    Lnew = np.repeat(L, 3, axis=1)
    y = x - Lnew
    return y, Lnew


def is_pos_def(x):
    return np.all(np.linalg.eigvals(x) > 0)

exog = sm.add_constant(dfx['x'])
endog = dfy['y']
dft = pd.DataFrame(x_test, columns = ['test'])

qrmodel = QuantReg(endog, exog)
result = qrmodel.fit(q=0.5)

ypred_qr = np.dot(dft, result.params[1]) + result.params[0] #results.predict(dft)

"""Student-t"""

tmodel = TLinearModel(endog, exog)
results = tmodel.fit(df=0.6)

ypred_t = np.dot(dft, results.params[1]) + results.params[0] #results.predict(dft)

"""Plot"""

plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.yticks(np.arange(ymin, ymax, 1.0))
plt.scatter(x, y, color="none", edgecolor="black")
plt.plot(x_test, y_pred1, '-.', color='black') #Least squares
plt.plot(x_test, y_pred2, '--', color='green') #Huber
plt.plot(x_test, ypred_t, color='red')         #student
plt.plot(x_test, ypred_qr, '--', color='blue') 
plt.legend(["Least squares", "Huber, \u0394 =1", "Student-t, \u03BD =0.6", "Laplace"])
pml.save_fig('Robust.png')
plt.savefig('Robust.png')
plt.show()
Esempio n. 22
0
    D = X.shape[1]
    w = np.linalg.inv(X.T @ X + lambd * np.eye(D, D)) @ X.T @ y

    return w


fig, ax = plt.subplots(3, 3, figsize=(10,10))
plt.tight_layout()

for (i, s) in enumerate(sigmas):
    rbf_train = rbf_features(addones(xtrain), addones(centers), s)
    rbf_test = rbf_features(addones(xtest), addones(centers), s)
    reg_w = ridgeReg(rbf_train, ytrain, 0.3)
    ypred = rbf_test @ reg_w

    ax[i, 0].plot(xtrain, ytrain, '.', markersize=8)
    ax[i, 0].plot(xtest, ypred)
    ax[i, 0].set_ylim([-10, 20])
    ax[i, 0].set_xticks(np.arange(0, 21, 5))

    for j in range(K):
        ax[i, 1].plot(xtest, rbf_test[:, j], 'b-')
        ax[i, 1].set_xticks(np.arange(0, 21, 5))
        ax[i, 1].ticklabel_format(style='sci', scilimits=(-2, 2))

    ax[i, 2].imshow(rbf_train, interpolation='nearest', aspect='auto', cmap=plt.get_cmap('viridis'))
    ax[i, 2].set_yticks(np.arange(20, 4, -5))
    ax[i, 2].set_xticks(np.arange(2, 10, 2))
pml.save_fig("../figures/rbfDemoALL.pdf", dpi=300)
plt.show()
Esempio n. 23
0

x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)

z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)

#plt.figure(figsize=(10,4))
plt.figure()

#plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "r^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
pml.save_fig("xor-heaviside.pdf")
plt.show()

plt.figure()
#plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "r^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
pml.save_fig("xor-sigmoid.pdf")
plt.show()
Esempio n. 24
0
ndx = np.argsort(Nh)
Nh = Nh[ndx]
marginal_lik = marginal_lik[ndx]
log_lik = log_lik[ndx]
log_BF = log_BF[ndx]

p0 = (1 / 2)**N
plt.plot(marginal_lik, 'o-', linewidth=2)
plt.plot((0, 2**N), (p0, p0), c='k', linewidth=2)
plt.xticks(list(range(len(Nh))), Nh.astype(int))
plt.xlabel('num heads')
plt.title(
    r"Marginal likelihood for Beta-Bernoulli model $\int p(D|\theta) Be(\theta | 1, 1,) d\Theta$"
)
plt.xlim((-0.6, 2**N))
save_fig("joshCoins4.pdf")
plt.show()

plt.plot(np.exp(log_BF), 'o-', linewidth=2)
plt.title("BF(1,0)")
plt.xticks(list(range(len(Nh))), Nh.astype(int))
plt.xlim((-0.6, 2**N))
save_fig("joshCoins4BF.pdf")
plt.show()

BIC1 = log_lik - 1
plt.plot(BIC1, 'o-', linewidth=2)
plt.title(r"BIC approximation to $log_{10} p(D|M1)$")
plt.xticks(list(range(len(Nh))), Nh.astype(int))
plt.xlim((-0.6, 2**N))
save_fig("joshCoins4BIC.pdf")
Esempio n. 25
0
m = 1 / x_k
b = f(x_k) - m * x_k
tl = lambda x: m * x + b

plt.plot((0.1, 12), (0, 0), '-k', linewidth=2, zorder=1)
plt.plot(domain, f(domain), '-r', linewidth=3, label=r"$g(x)$", zorder=2)
plt.plot(domain,
         tl(domain),
         '--b',
         linewidth=2.5,
         label=r"$g_{lin}(x)$",
         zorder=3)

plt.scatter(x_k, f(x_k), marker='.', c='black', s=180, zorder=4)
plt.scatter(-b / m, 0, marker='.', c='black', s=180, zorder=4)
plt.plot((x_k, x_k), (ymin, f(x_k)), ":k")
plt.plot((-b / m, -b / m), (ymin, 0), ":k")

plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))

frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])

plt.xticks([x_k, -b / m], [r'$x_{k}$', r'$x_{k} + d_{k}$'])

plt.legend()
save_fig("newtonsMethodMin1d.pdf")
plt.show()
Esempio n. 26
0
    X, Y = np.meshgrid(weight, weight)
    ax.plot_surface(Y,
                    X,
                    probs,
                    cmap='jet',
                    vmin=0,
                    vmax=3,
                    rstride=1,
                    cstride=1,
                    linewidth=0)
    ax.view_init(elev=25, azim=azim)
    ax.set_zlabel('p')
    ttl = ','.join(['{:0.2f}'.format(d) for d in alphavec])
    ax.set_title(ttl)
    alpha = int(np.round(alphavec[0] * 10))
    save_fig('dirSimplexAlpha{}.pdf'.format(alpha))
    plt.show()

if 0:
    fig = plt.figure(figsize=(20, 15))
    ax = fig.add_subplot(111, projection='3d')
    ax.plot_surface(Y,
                    X,
                    probs,
                    cmap='jet',
                    vmin=0,
                    vmax=3,
                    rstride=1,
                    cstride=1,
                    linewidth=0)
    ax.view_init(elev=25, azim=200)
Esempio n. 27
0
X = rgb2gray(img)

r = np.linalg.matrix_rank(X)
print(r)

U, sigma, V = np.linalg.svd(X, full_matrices=True)
ranks = [1, 2, 5, 10, 20, r]
R = len(ranks)

for i in range(R):
    k = ranks[i]
    x_hat = np.matrix(U[:, :k]) * np.diag(sigma[:k]) * np.matrix(V[:k, :])
    plt.imshow(x_hat, cmap='gray')
    plt.title("rank {}".format(k))
    plt.axis("off")
    save_fig("svdImageDemoClown{}.pdf".format(k))
    plt.show()

k = 100
plt.plot(np.log(sigma[:k]), 'r-', linewidth=4, label="Original")
plt.ylabel(r"$log(\sigma_i)$")
plt.xlabel("i")

# permutation only permutes the rows, which does not destroy the structure
# The singular values are identical
x2 = np.random.permutation(X)
# so we convert to a 1d vector, permute, and convert back
x1d = X.ravel()
np.random.shuffle(x1d)  # inplace
x2 = x1d.reshape(X.shape)
U, sigma2, V = np.linalg.svd(x2, full_matrices=False)
Esempio n. 28
0

n = 50
xrange = np.linspace(-2, 2, n)
yrange = np.linspace(-2, 2, n)
xx, yy = np.meshgrid(xrange, yrange)
flatxx = xx.reshape((n**2, 1))
flatyy = yy.reshape((n**2, 1))
X = np.column_stack((flatxx, flatyy))
f = noisy_oracle(X)

fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx, yy, f.reshape(n, n), rstride=1, cstride=1, cmap='jet')

save_fig('rbf-boss-surface.png')
plt.show()

N = X.shape[0]
perm = np.random.permutation(N)
Ntrain = 500
ndx = perm[:Ntrain]
XX = X[ndx, :]
ff = f[ndx]
fmax = np.max(ff)
thresh = fmax * 0.6
thresh = 1.0
yy = ff >= thresh

fig = plt.figure()
ax = fig.add_subplot(111)
Esempio n. 29
0
    if use_quad:
        phi = np.column_stack((X_pad, X**2))

    fig = plt.figure()
    ax = Axes3D(fig)
    ax.set_zlim(15, 19)
    ax.scatter(X[:, 0], X[:, 1], y, color='r')

    n = 10
    xrange = np.linspace(min(X[:, 0]), max(X[:, 0]), n)
    yrange = np.linspace(min(X[:, 1]), max(X[:, 1]), n)
    xx, yy = np.meshgrid(xrange, yrange)
    flatxx = xx.reshape((n**2, 1))
    flatyy = yy.reshape((n**2, 1))
    w = np.linalg.lstsq(phi, y)[0]

    z = np.column_stack((flatxx, flatyy))
    z = np.column_stack((np.ones(n**2), z))
    if use_quad:
        z = np.column_stack((z, flatxx**2, flatyy**2))

    f = np.dot(z, w)
    ax.plot_surface(xx, yy, f.reshape(n, n), rstride=1, cstride=1, cmap='jet')

    name = 'linregSurfaceLinear.pdf'
    if use_quad:
        name = 'linregSurfaceQuad.pdf'

    save_fig(name)
    plt.show()
Esempio n. 30
0
    muk[:, k] = w
    q = np.multiply(weights[:, k], muk[:, k])
    mu = mu + q
    vk[:, k] = sigma2[:, k]
    v = v + np.multiply(weights[:, k],
                        (vk[:, k] + np.square(muk[:, k]))).reshape(-1, 1)

v = v - np.square(mu).reshape(-1, 1)

plt.figure()
plt.scatter(xtest, y, edgecolors='blue', color="none")
plt.plot(xtest, muk[:, 0])
plt.plot(xtest, muk[:, 1])
plt.plot(xtest, muk[:, 2])
plt.title('Expert-predictions')
pml.save_fig('mixexp_expert_predictions.pdf')
plt.show()

plt.figure()
for i in range(K):
    plt.scatter(y, post[:, i])
plt.title('Gating functions')
pml.save_fig('mixexp_gating_functions.pdf')
plt.show()

map = np.empty((K, 1))
map = np.argmax(post, axis=1)
map = map.reshape(-1, 1)
yhat = np.empty((N, 1))
for i in range(N):
    yhat[i, 0] = muk[i, map[i, 0]]