Ejemplo n.º 1
0
    y_lim=y_lim,
    title="Ensemble of ten MAP networks",
    save_path=figure_dir.joinpath(
        f"ensemble_moment_matched_{experiment_name}.pdf"),
)

# %% codecell
gaussian_predictions = ensemble.predict_list_of_gaussians(x_plot,
                                                          n_predictions=3)
fig, ax = plt.subplots(figsize=figsize)
plot_distribution_samples(
    x_plot=_x_plot,
    distribution_samples=gaussian_predictions,
    x_train=_x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    fig=fig,
    ax=ax,
    y_lim=y_lim,
    save_path=figure_dir.joinpath(f"ensemble_members_{experiment_name}.pdf"),
)

# %% markdown
# # Neural Linear Model

# %%
layer_units[-1] = 1
initial_learning_rate = 0.01
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate, decay_steps=n_train, decay_rate=0.9, staircase=True)
Ejemplo n.º 2
0
    input_shape=input_shape,
    layer_units=layer_units,
    layer_activations=layer_activations,
    weight_prior=weight_prior,
    bias_prior=bias_prior,
    n_train=n_train,
    learning_rate=0.01,
)

prior_predictive_distributions = net.predict_with_prior_samples(x_plot,
                                                                n_samples=4)

plot_distribution_samples(
    x_plot=x_plot,
    distribution_samples=prior_predictive_distributions,
    x_train=x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    # y_lim=[-30, 30],
)

# %%
early_stop_callback = tf.keras.callbacks.EarlyStopping(
    monitor="loss", patience=20, verbose=1, restore_best_weights=False)

net.fit(
    x_train=x_train,
    y_train=y_train,
    batch_size=batch_size,
    epochs=epochs,
    early_stop_callback=early_stop_callback,
    verbose=0,
# weights[3][:] = np.array([-1., 2., 10., 10.])
# weights[3]
# weights[4][:] = -10
# variational_network.set_weights(weights)

# %%
predictive_distribution = variational_network.predict(x_plot, n_predictions=20)
gaussian_predictions = variational_network.predict_list_of_gaussians(
    x_plot, n_predictions=4)
plot_moment_matched_predictive_normal_distribution_and_function_samples(
    x_plot=x_plot,
    predictive_distribution=predictive_distribution,
    distribution_samples=gaussian_predictions,
    x_train=x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    y_lim=y_lim,
    no_ticks=False,
)

# %%
plot_distribution_samples(
    x_plot=x_plot,
    distribution_samples=gaussian_predictions,
    x_train=x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    y_lim=y_lim,
    no_ticks=False,
)
Ejemplo n.º 4
0
    predictive_distribution=prediction,
    x_train=_x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    y_lim=y_lim,
    # save_path=figure_dir.joinpath(f"ensemble_moment_matched_x3_gap.pdf")
)

# %% codecell
gaussian_predictions = ensemble.predict_list_of_gaussians(x_plot,
                                                          n_predictions=3)
plot_distribution_samples(
    x_plot=_x_plot,
    distribution_samples=gaussian_predictions,
    x_train=_x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    y_lim=y_lim,
    # save_path=figure_dir.joinpath(f"ensemble_members_x3_gap.pdf")
)

# %% markdown
# # Neural Linear Model

# %%
layer_units[-1] = 1
initial_learning_rate = 0.01
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate, decay_steps=n_train, decay_rate=0.9, staircase=True)

# last layer bayesian network
    step_size=step_size,
    num_leapfrog_steps=100,
    max_tree_depth=10,
    seed=0,
)


prior_samples = [hmc_net.sample_prior_state(seed=seed) for seed in [0, 1, 2, 3, 4]]
prior_predictions = [
    hmc_net.predict_from_sample_parameters(x_plot, prior_sample)
    for prior_sample in prior_samples
]
plot_distribution_samples(
    x_plot=x_plot,
    distribution_samples=prior_predictions,
    x_train=x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
)


# %% markdown
# As a sanity check we can assert the posterior equivalence between a map network and an
# hmc network. This checks that the prior and likelihood are equivalent.

# %%
assert check_posterior_equivalence(net, hmc_net, x_train, y_train)


# %%
# How to prepare MAP weights for HMC:
save_dir = "._toy_network_saving/"
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
save_path = save_dir.joinpath("toy_map_ensemble")
ensemble.save(save_path)

# %%
loaded_ensemble = map_density_ensemble_from_save_path(save_path)
ensemble = loaded_ensemble

# %% codecell
gaussian_predictions = ensemble.predict_list_of_gaussians(x_plot, n_predictions=3)
plot_distribution_samples(
    x_plot=_x_plot,
    distribution_samples=gaussian_predictions,
    x_train=_x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,
    y_lim=y_lim,
)
# fig.savefig(os.path.join(figure_dir, f"{n_networks}_ml_density_ensemble_mixture_of_gaussian_heteroscedastic.pdf"))


# %% codecell
gaussian_predictions = ensemble.predict_list_of_gaussians(x_plot, n_predictions=5)
plot_moment_matched_predictive_normal_distribution_and_function_samples(
    x_plot=_x_plot,
    predictive_distribution=mog_prediction,
    distribution_samples=gaussian_predictions,
    x_train=_x_train,
    y_train=y_train,
    y_ground_truth=y_ground_truth,