Example #1
0
for factor in teacher.factors:
    print(factor)

scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
                                 max_iter=40)
qplot(ep_evo,
      x="iter",
      y=["mse", "sign_mse", "v"],
      y_markers=["x", ".", "-"],
      column="id",
      y_legend=True)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve
# See data/door_mse_curves.py for the code
rename = {
    "alpha": r"$\alpha$",
    "output_width": "K",
    "a0": r"$a_0$",
    "v": "MSE",
    "n_iter": "iterations",
    "x_id=": "",
    "p_pos": r"$p_+$",
Example #2
0
# You can build the perceptron directly, or use the `glm_generative` model builder.
teacher = glm_generative(N=1000,
                         alpha=1.7,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="sgn")
scenario = BayesOptimalScenario(teacher, x_ids=["x"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dynamics
ep_evo = scenario.ep_convergence(metrics=["mse"],
                                 max_iter=30,
                                 callback=EarlyStoppingEP())
qplot(ep_evo, x="iter", y=["mse", "v"], y_markers=[".", "-"], y_legend=True)

# %%
# Recovered signal
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# Compare EP vs SE
# ----------------
# See `data/perceptron_ep_vs_se.py` for the corresponding script.
rename = {
    "alpha": r"$\alpha$",
    "n_iter": "iterations",
    "p_pos": r"$p_+$",
    "source=": ""
}
Example #3
0
"""
Universality (noiseless CS)
===========================

"""
import pandas as pd
from matplotlib import rcParams
rcParams['axes.unicode_minus'] = False
from tramp.experiments import qplot

# %%
# Model
# -----
# We consider for the sensing matrix $F$ a random features matrix
# $F = \tfrac{1}{\sqrt{N}}f(WX)$ where $f$ = abs, relu, sgn or tanh.
# See `data/cs_universality.py` for the corresponding script.
rename = {
    "alpha": r"$\alpha$", "prior_rho": r"$\rho$",
    "source=": "", "n_iter": "iterations"
}
univ = pd.read_csv("data/cs_universality.csv")
qplot(
    univ.query("source=='SE'"),
    x="alpha", y="v", linestyle="f", column="prior_rho",
    rename=rename, usetex=True, font_size=16
)
Example #4
0
                       prior_rho=0.5)
scenario = BayesOptimalScenario(model, x_ids=["x"])
scenario.setup()
scenario.student.plot()

for factor in scenario.student.factors:
    print(factor.id, factor)

# %%
# EP dyanmics
# Damping is needed
# really bad without damping
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"], max_iter=20)
qplot(ep_evo,
      x="iter",
      y=["phase_mse", "v"],
      y_markers=["x", "-"],
      y_legend=True)
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"],
                                 max_iter=70,
                                 damping=0.3)
qplot(ep_evo,
      x="iter",
      y=["mse", "phase_mse", "v"],
      y_markers=[".", "x", "-"],
      y_legend=True)

plot_compare_complex(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# Compare EP vs SE
Example #5
0
                         prior_rho=0.5,
                         prior_mean=0.1)
for factor in teacher.factors:
    print(factor)
    scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup()
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
                                 max_iter=20)
qplot(ep_evo,
      x="iter",
      y=["mse", "sign_mse", "v"],
      y_markers=["x", ".", "-"],
      column="id",
      y_legend=True)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve
# See data/sgn_retrieval_mse_curves.py for the code
rename = {
    "alpha": r"$\alpha$",
    "prior_rho": r"$\rho$",
    "prior_mean": r"$\mu$",
    "a0=0.1": "uninformed",
    "a0=1000.0": "informed",
    "v": "MSE",
    "n_iter": "iterations",
Example #6
0
teacher = glm_generative(
    N=N, alpha=alpha, ensemble_type="gaussian", prior_type="gauss_bernoulli",
    output_type="relu", prior_rho=0.5
)
for factor in teacher.factors:
    print(factor)
scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()


# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=10)
qplot(
    ep_evo, x="iter", y=["mse", "v"],
    y_markers=[".", "-"], column="id", y_legend=True
)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve
# See data/relu_mse_curves.py for the code
rename = {
    "alpha": r"$\alpha$", "v": "MSE", "n_iter": "iter", "a0": r"$a_0$",
    "prior_rho": r"$\rho$", "x_id=": "", "n_iter": "iterations"
}
mse_curves = pd.read_csv("data/relu_mse_curves.csv")

qplot(
    mse_curves.query("x_id =='x'"), x="alpha", y="v", color="prior_rho",
    rename=rename, usetex=True, font_size=14
Example #7
0
                         ensemble_type="gaussian",
                         prior_type="gauss_bernoulli",
                         output_type="gaussian",
                         output_var=1e-11,
                         prior_rho=0.5)
for factor in teacher.factors:
    print(factor)
scenario = BayesOptimalScenario(teacher, x_ids=["x"])
scenario.setup()
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=10)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])
qplot(ep_evo, x="iter", y=["v", "mse"], y_markers=["-", "."], y_legend=True)

# %%
# Compare EP vs SE
rename = {
    "alpha": r"$\alpha$",
    "prior_rho": r"$\rho$",
    "source=": "",
    "n_iter": "iterations"
}
ep_vs_se = pd.read_csv("data/compressed_sensing_ep_vs_se.csv")
qplot(ep_vs_se,
      x="alpha",
      y="v",
      marker="source",
      column="prior_rho",