コード例 #1
0
def bootstrap_evaluation(x, y, dropout, learning_rate, epochs, n_heads, ax):
    sess, x_placeholder, dropout_placeholder, mask_placeholder =\
        bootstrap_training(x, y, dropout, learning_rate, epochs, n_heads)

    prediction_op = sess.graph.get_collection("prediction")
    uncertainty_op = sess.graph.get_collection("uncertainties")
    heads_op = sess.graph.get_collection("heads")

    additional_range = 0.2 * np.max(x)
    x_eval = np.linspace(
        np.min(x) - additional_range,
        np.max(x) + additional_range, 100).reshape([-1, 1])
    feed_dict = {
        x_placeholder: x_eval,
        dropout_placeholder: 0,
        mask_placeholder: np.ones(shape=(len(x_eval), n_heads, 1))
    }

    y_eval, uncertainties_eval, heads_eval = sess.run(
        [prediction_op, uncertainty_op, heads_op], feed_dict)
    heads_eval = np.array(heads_eval).reshape(len(x_eval), n_heads)
    y_eval = y_eval[0].flatten()
    uncertainties_eval = uncertainties_eval[0].flatten()

    for i in range(n_heads):
        ax.plot(x_eval, heads_eval[:, i], alpha=0.3)

    plotting.plot_mean_vs_truth(x, y, x_eval, y_eval, uncertainties_eval, ax)
コード例 #2
0
def dropout_evaluation(x, y, dropout, learning_rate, epochs, n_passes, ax):
    # Hardcoded training dropout
    sess, x_placeholder, dropout_placeholder = \
        dropout_training(x, y, 0.2, learning_rate, epochs)

    prediction_op = sess.graph.get_collection("prediction")

    additional_range = 0.1 * np.max(x)
    x_eval = np.linspace(np.min(x) - additional_range, np.max(x) + additional_range, 100).reshape([-1, 1])

    feed_dict = {x_placeholder: x_eval,
                 dropout_placeholder: dropout}

    predictions = []
    for _ in range(n_passes):
        predictions.append(sess.run(prediction_op, feed_dict)[0])

    y_eval = np.mean(predictions, axis=0).flatten()
    uncertainty_eval = np.var(predictions, axis=0).flatten()

    plotting.plot_mean_vs_truth(x, y, x_eval, y_eval, uncertainty_eval, ax)
コード例 #3
0
    x_eval = np.linspace(np.min(x) - additional_range, np.max(x) + additional_range, 100).reshape([-1, 1])
    x = x.reshape((-1, 1))
    y = y.reshape((-1, 1))

    fig, axs = plt.subplots(2, 1, figsize=(30, 10))
    kernel = 0.5 * RBF(length_scale=0.01)
    gp = GaussianProcessRegressor(kernel=kernel, alpha=0.03, n_restarts_optimizer=10)

    y_prior = gp.sample_y(x_eval, 5)
    axs[0].plot(x_eval, y_prior)
    gp.fit(x, y)

    y_eval, sigma = gp.predict(x_eval, return_std=True)
    y_eval = y_eval.flatten()

    plotting.plot_mean_vs_truth(x, y, x_eval, y_eval, sigma, axs[1])
    axs[1].set_title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
                     % (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)))
    plt.show()
    fig.savefig("GP_Sinus.pdf")
    plt.close()

    x, y = sample_generators.generate_osband_nonlinear_samples()
    additional_range = 0.2 * np.max(x)
    x_eval = np.linspace(np.min(x) - additional_range, np.max(x) + additional_range, 100).reshape([-1, 1])
    x = x.reshape((-1, 1))
    y = y.reshape((-1, 1))

    fig, axs = plt.subplots(2, 1, figsize=(30, 10))
    kernel = 1 * RBF(length_scale=1)
    gp = GaussianProcessRegressor(kernel=kernel, alpha=0.5, n_restarts_optimizer=10)