Exemplo n.º 1
0
tfd = tfp.distributions

# %% codecell
np.random.seed(0)
n_train = 90
batch_size = n_train
epochs = 10000

x_train, y_train = create_split_periodic_data_heteroscedastic(n_data=n_train,
                                                              sigma1=0.1,
                                                              sigma2=0.8,
                                                              seed=42)
x_plot = create_x_plot(x_train)
# preprocessor = StandardPreprocessor()
# x_train, _x_plot, x_plot = preprocessor.preprocess_create_x_train_x_plot(_x_train)
y_ground_truth = ground_truth_periodic_function(x_plot)

input_shape = [1]
layer_units = [50, 20] + [2]
layer_activations = ["relu"] * (len(layer_units) - 1) + ["linear"]

weight_prior = tfd.Normal(0, 1)
bias_prior = weight_prior
a = 0.5
b = 0.01
_var_d = tfd.InverseGamma(a, b)
noise_scale_prior = tfd.TransformedDistribution(distribution=_var_d,
                                                bijector=tfp.bijectors.Invert(
                                                    tfp.bijectors.Square()))

# %% codecell
    n_data=n_train,
    lower1=-4.0,
    upper1=-1.3,
    lower2=4.0,
    upper2=7.0,
    sigma1=0.1,
    sigma2=0.1,
    p=p,
    amplitude=amplitude,
    seed=seed,
)
preprocessor = StandardPreprocessor()
x_train, _x_plot, x_plot = preprocessor.preprocess_create_x_train_x_plot(
    _x_train, test_ds=0.03)
y_ground_truth = ground_truth_periodic_function(_x_plot,
                                                p=p,
                                                amplitude=amplitude)

layer_units = [50, 20] + [2]
layer_activations = ["relu"] * (len(layer_units) - 1) + ["linear"]

# %% codecell
experiment_name = "nonlinear-gap"
y_lim = [-5, 5]

fig, ax = plt.subplots(figsize=(8, 8))
plot_training_data(_x_train, y_train, fig=fig, ax=ax, y_lim=y_lim)
plot_ground_truth(_x_plot, y_ground_truth, fig=fig, ax=ax, alpha=0.2)
ax.legend()
fig.savefig(figure_dir.joinpath(f"{experiment_name}_data.pdf"))