Beispiel #1
0
# $y = \mathrm{sgn}(Fx) \in \pm^M$, where
# $F  \in \mathbb{R}^{M \times N}$ is a Gaussian random matrix.
# You can build the perceptron directly, or use the `glm_generative` model builder.
teacher = glm_generative(N=1000,
                         alpha=1.7,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="sgn")
scenario = BayesOptimalScenario(teacher, x_ids=["x"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dynamics
ep_evo = scenario.ep_convergence(metrics=["mse"],
                                 max_iter=30,
                                 callback=EarlyStoppingEP())
qplot(ep_evo, x="iter", y=["mse", "v"], y_markers=[".", "-"], y_legend=True)

# %%
# Recovered signal
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# Compare EP vs SE
# ----------------
# See `data/perceptron_ep_vs_se.py` for the corresponding script.
rename = {
    "alpha": r"$\alpha$",
    "n_iter": "iterations",
    "p_pos": r"$p_+$",
Beispiel #2
0
                       prior_type="gauss_bernouilli",
                       output_type="modulus",
                       prior_mean=0.01,
                       prior_rho=0.5)
scenario = BayesOptimalScenario(model, x_ids=["x"])
scenario.setup()
scenario.student.plot()

for factor in scenario.student.factors:
    print(factor.id, factor)

# %%
# EP dyanmics
# Damping is needed
# really bad without damping
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"], max_iter=20)
qplot(ep_evo,
      x="iter",
      y=["phase_mse", "v"],
      y_markers=["x", "-"],
      y_legend=True)
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"],
                                 max_iter=70,
                                 damping=0.3)
qplot(ep_evo,
      x="iter",
      y=["mse", "phase_mse", "v"],
      y_markers=[".", "x", "-"],
      y_legend=True)

plot_compare_complex(scenario.x_true["x"], scenario.x_pred["x"])
                         prior_type="binary",
                         output_type="door",
                         output_width=1.,
                         prior_p_pos=0.51)

for factor in teacher.factors:
    print(factor)

scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
                                 max_iter=40)
qplot(ep_evo,
      x="iter",
      y=["mse", "sign_mse", "v"],
      y_markers=["x", ".", "-"],
      column="id",
      y_legend=True)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve
# See data/door_mse_curves.py for the code
rename = {
    "alpha": r"$\alpha$",
    "output_width": "K",
Beispiel #4
0
alpha = 2.
N = 1000
teacher = glm_generative(
    N=N, alpha=alpha, ensemble_type="gaussian", prior_type="gauss_bernoulli",
    output_type="relu", prior_rho=0.5
)
for factor in teacher.factors:
    print(factor)
scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()


# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=10)
qplot(
    ep_evo, x="iter", y=["mse", "v"],
    y_markers=[".", "-"], column="id", y_legend=True
)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve
# See data/relu_mse_curves.py for the code
rename = {
    "alpha": r"$\alpha$", "v": "MSE", "n_iter": "iter", "a0": r"$a_0$",
    "prior_rho": r"$\rho$", "x_id=": "", "n_iter": "iterations"
}
mse_curves = pd.read_csv("data/relu_mse_curves.csv")