def run_perceptron(N, alpha, p_pos):
    model = glm_generative(N=N,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="binary",
                           output_type="sgn",
                           prior_p_pos=p_pos)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping()
    records = scenario.run_all(max_iter=200, callback=early)
    return records
def run_cs(N, alpha, ensemble_type, prior_rho):
    model = glm_generative(
        N=N, alpha=alpha, ensemble_type=ensemble_type, 
        prior_type="gauss_bernoulli", output_type="gaussian",
        prior_rho=prior_rho, output_var=1e-11
    )
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping()
    records = scenario.run_all(
        metrics=["mse"], max_iter=200, callback=early
    )
    return records
def run_EP(alpha, rho, seed):
    model = glm_generative(N=2000,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="gauss_bernoulli",
                           output_type="abs",
                           prior_rho=rho,
                           prior_mean=0)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    x_data = scenario.run_ep(max_iter=200, damping=0.2)
    x_pred = x_data["x"]["r"]
    mse = sign_symmetric_mse(x_pred, scenario.x_true["x"])
    return dict(source="EP", v=mse)
Exemple #4
0
def run_EP(alpha, rho, seed):
    model = glm_generative(N=2000,
                           alpha=alpha,
                           ensemble_type="gaussian",
                           prior_type="gauss_bernoulli",
                           output_type="gaussian",
                           prior_rho=rho,
                           output_var=1e-10)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    x_data = scenario.run_ep(max_iter=200)
    x_pred = x_data["x"]["r"]
    mse = mean_squared_error(x_pred, scenario.x_true["x"])
    return dict(source="EP", v=mse)
Exemple #5
0
def run_phase_retrieval(N, alpha, prior_mean):
    model = glm_generative(N=N,
                           alpha=alpha,
                           ensemble_type="complex_gaussian",
                           prior_type="gauss_bernouilli",
                           output_type="modulus",
                           prior_mean=prior_mean,
                           prior_rho=0.5)
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    early = EarlyStopping(wait_increase=10)
    records = scenario.run_all(metrics=["mse", "phase_mse"],
                               max_iter=200,
                               damping=0.3,
                               callback=early)
    return records
Exemple #6
0
import pandas as pd
from tramp.algos import EarlyStoppingEP
from tramp.models import glm_generative
from tramp.experiments import BayesOptimalScenario, qplot, plot_compare

# %%
# Model
# -----
# We wish to infer the binary signal
# $x \sim \mathrm{Bin}( . | p_+) \in \pm^N$ from
# $y = \mathrm{sgn}(Fx) \in \pm^M$, where
# $F  \in \mathbb{R}^{M \times N}$ is a Gaussian random matrix.
# You can build the perceptron directly, or use the `glm_generative` model builder.
teacher = glm_generative(N=1000,
                         alpha=1.7,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="sgn")
scenario = BayesOptimalScenario(teacher, x_ids=["x"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dynamics
ep_evo = scenario.ep_convergence(metrics=["mse"],
                                 max_iter=30,
                                 callback=EarlyStoppingEP())
qplot(ep_evo, x="iter", y=["mse", "v"], y_markers=[".", "-"], y_legend=True)

# %%
# Recovered signal
# Setup
from tramp.algos import EarlyStoppingEP
from tramp.models import glm_generative
from tramp.experiments import BayesOptimalScenario, qplot, plot_compare
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# %%
# Model
alpha = 1.6
N = 1000
teacher = glm_generative(N=N,
                         alpha=alpha,
                         ensemble_type="gaussian",
                         prior_type="binary",
                         output_type="door",
                         output_width=1.,
                         prior_p_pos=0.51)

for factor in teacher.factors:
    print(factor)

scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
Exemple #8
0
# Setup
from tramp.algos import EarlyStoppingEP
from tramp.experiments import BayesOptimalScenario, qplot, plot_compare_complex
from tramp.models import glm_generative
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# %%
# Model
np.random.seed(42)

model = glm_generative(N=1000,
                       alpha=2,
                       ensemble_type="complex_gaussian",
                       prior_type="gauss_bernouilli",
                       output_type="modulus",
                       prior_mean=0.01,
                       prior_rho=0.5)
scenario = BayesOptimalScenario(model, x_ids=["x"])
scenario.setup()
scenario.student.plot()

for factor in scenario.student.factors:
    print(factor.id, factor)

# %%
# EP dyanmics
# Damping is needed
# really bad without damping
ep_evo = scenario.ep_convergence(metrics=["mse", "phase_mse"], max_iter=20)
from tramp.experiments import run_experiments, qplot, plot_compare
from tramp.models import glm_generative
from tramp.experiments import BayesOptimalScenario
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.INFO)

# %%
# Model
np.random.seed(42)
teacher = glm_generative(N=1000,
                         alpha=1.2,
                         ensemble_type="gaussian",
                         prior_type="gauss_bernoulli",
                         output_type="abs",
                         prior_rho=0.5,
                         prior_mean=0.1)
for factor in teacher.factors:
    print(factor)
    scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup()
scenario.student.plot()

# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse", "sign_mse"],
                                 damping=0.1,
                                 max_iter=20)
qplot(ep_evo,
Exemple #10
0
from tramp.models import glm_generative
from tramp.experiments import BayesOptimalScenario
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.INFO)


# %%
# Model

alpha = 2.
N = 1000
teacher = glm_generative(
    N=N, alpha=alpha, ensemble_type="gaussian", prior_type="gauss_bernoulli",
    output_type="relu", prior_rho=0.5
)
for factor in teacher.factors:
    print(factor)
scenario = BayesOptimalScenario(teacher, x_ids=["x", "z"])
scenario.setup(seed=42)
scenario.student.plot()


# %%
# EP dyanmics
ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=10)
qplot(
    ep_evo, x="iter", y=["mse", "v"],
    y_markers=[".", "-"], column="id", y_legend=True
)