def run_perceptron(N, alpha, p_pos):
    model = glm_generative(N=N,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="binary",
                           output_type="sgn",
                           prior_p_pos=p_pos)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping()
    records = scenario.run_all(max_iter=200, callback=early)
    return records
def run_cs(N, alpha, ensemble_type, prior_rho):
    model = glm_generative(
        N=N, alpha=alpha, ensemble_type=ensemble_type, 
        prior_type="gauss_bernoulli", output_type="gaussian",
        prior_rho=prior_rho, output_var=1e-11
    )
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping()
    records = scenario.run_all(
        metrics=["mse"], max_iter=200, callback=early
    )
    return records
def run_EP(alpha, rho, seed):
    model = glm_generative(N=2000,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="gauss_bernoulli",
                           output_type="abs",
                           prior_rho=rho,
                           prior_mean=0)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    x_data = scenario.run_ep(max_iter=200, damping=0.2)
    x_pred = x_data["x"]["r"]
    mse = sign_symmetric_mse(x_pred, scenario.x_true["x"])
    return dict(source="EP", v=mse)
Exemple #4
0
def run_EP(alpha, rho, seed):
    model = glm_generative(N=2000,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="gauss_bernoulli",
                           output_type="gaussian",
                           prior_rho=rho,
                           output_var=1e-10)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    x_data = scenario.run_ep(max_iter=200)
    x_pred = x_data["x"]["r"]
    mse = mean_squared_error(x_pred, scenario.x_true["x"])
    return dict(source="EP", v=mse)
Exemple #5
0
def run_phase_retrieval(N, alpha, prior_mean):
    model = glm_generative(N=N,
                           alpha=alpha,
                           ensemble_type="complex_gaussian",
                           prior_type="gauss_bernouilli",
                           output_type="modulus",
                           prior_mean=prior_mean,
                           prior_rho=0.5)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping(wait_increase=10)
    records = scenario.run_all(metrics=["mse", "phase_mse"],
                               max_iter=200,
                               damping=0.3,
                               callback=early)
    return records
def mse_lasso(alpha, param_scaled, seed):
    # create scenario
    N, rho, noise_var = 1000, 0.05, 1e-2
    M = int(alpha * N)
    A = GaussianEnsemble(M=M, N=N).generate()
    model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A)
             @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model()
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    y = scenario.observations["y"]
    # run lasso
    param_scikit = noise_var * param_scaled / (M * rho)
    lasso = Lasso(alpha=param_scikit)
    lasso.fit(A, y)
    x_pred = lasso.coef_
    mse = mean_squared_error(x_pred, scenario.x_true["x"])
    return mse
Exemple #7
0
def run_benchmark(alpha, algo, seed):
    # create scenario
    N, rho, noise_var = 1000, 0.05, 1e-2
    M = int(alpha * N)
    A = GaussianEnsemble(M=M, N=N).generate()
    t0 = time()
    model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A)
             @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model()
    t1 = time()
    record = {"svd_time": t1 - t0}  # svd precomputation time
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    y = scenario.observations["y"]
    # run algo
    t0 = time()
    if algo == "SE":
        x_data = scenario.run_se(max_iter=1000, damping=0.1)
        record["mse"] = x_data["x"]["v"]
        record["n_iter"] = x_data["n_iter"]
    if algo == "EP":
        x_data = scenario.run_ep(max_iter=1000, damping=0.1)
        x_pred = x_data["x"]["r"]
        record["n_iter"] = x_data["n_iter"]
    if algo == "LassoCV":
        lasso = LassoCV(cv=5)
        lasso.fit(A, y)
        x_pred = lasso.coef_
        record["param_scikit"] = lasso.alpha_
        record["n_iter"] = lasso.n_iter_
    if algo == "Lasso":
        optim = pd.read_csv("optimal_param_lasso.csv")
        param_scaled = np.interp(alpha, optim["alpha"], optim["param_scaled"])
        param_scikit = noise_var * param_scaled / (M * rho)
        lasso = Lasso(alpha=param_scikit)
        lasso.fit(A, y)
        x_pred = lasso.coef_
        record["param_scikit"] = param_scikit
        record["n_iter"] = lasso.n_iter_
    if algo == "pymc3":
        with pm.Model():
            ber = pm.Bernoulli("ber", p=rho, shape=N)
            nor = pm.Normal("nor", mu=0, sd=1, shape=N)
            x = pm.Deterministic("x", ber * nor)
            likelihood = pm.Normal("y",
                                   mu=pm.math.dot(A, x),
                                   sigma=np.sqrt(noise_var),
                                   observed=y)
            trace = pm.sample(draws=1000, chains=1, return_inferencedata=False)
        x_pred = trace.get_values('x').mean(axis=0)
    t1 = time()
    record["time"] = t1 - t0
    if algo != "SE":
        record["mse"] = mean_squared_error(x_pred, scenario.x_true["x"])
    return record
Exemple #8
0
from tramp.experiments import BayesOptimalScenario, qplot, plot_compare

# %%
# Model
# -----
# We wish to infer the binary signal
# $x \sim \mathrm{Bin}( . | p_+) \in \pm^N$ from
# $y = \mathrm{sgn}(Fx) \in \pm^M$, where
# $F  \in \mathbb{R}^{M \times N}$ is a Gaussian random matrix.
# You can build the perceptron directly, or use the `glm_generative` model builder.
teacher = glm_generative(N=1000,
                         alpha=1.7,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="sgn")
scenario = BayesOptimalScenario(teacher, x_ids=["x"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dynamics
ep_evo = scenario.ep_convergence(metrics=["mse"],
                                 max_iter=30,
                                 callback=EarlyStoppingEP())
qplot(ep_evo, x="iter", y=["mse", "v"], y_markers=[".", "-"], y_legend=True)

# %%
# Recovered signal
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# %%
# Model
alpha = 1.6
N = 1000
teacher = glm_generative(N=N,
                         alpha=alpha,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="door",
                         output_width=1.,
                         prior_p_pos=0.51)

for factor in teacher.factors:
    print(factor)

scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
                                 max_iter=40)
qplot(ep_evo,
      x="iter",
      y=["mse", "sign_mse", "v"],
      y_markers=["x", ".", "-"],
      column="id",
      y_legend=True)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])
Exemple #10
0
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# %%
# Model
np.random.seed(42)

model = glm_generative(N=1000,
                       alpha=2,
                       ensemble_type="complex_gaussian",
                       prior_type="gauss_bernouilli",
                       output_type="modulus",
                       prior_mean=0.01,
                       prior_rho=0.5)
scenario = BayesOptimalScenario(model, x_ids=["x"])
scenario.setup()
scenario.student.plot()

for factor in scenario.student.factors:
    print(factor.id, factor)

# %%
# EP dyanmics
# Damping is needed
# really bad without damping
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"], max_iter=20)
qplot(ep_evo,
      x="iter",
      y=["phase_mse", "v"],
      y_markers=["x", "-"],
Exemple #11
0
import logging
logging.basicConfig(level=logging.INFO)


# %%
# Model

alpha = 2.
N = 1000
teacher = glm_generative(
    N=N, alpha=alpha, ensemble_type="gaussian", prior_type="gauss_bernoulli",
    output_type="relu", prior_rho=0.5
)
for factor in teacher.factors:
    print(factor)
scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()


# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=10)
qplot(
    ep_evo, x="iter", y=["mse", "v"],
    y_markers=[".", "-"], column="id", y_legend=True
)
plot_compare(scenario.x_true["x"], scenario.x_pred["x"])

# %%
# MSE curve