Ejemplo n.º 1
0
 def build_model(self):
     self.prior = GaussBernouilliPrior(size=(self.N, ), rho=self.rho)
     ensemble = GaussianEnsemble(self.M, self.N)
     self.A = ensemble.generate()
     model = self.prior @ V(id="x") @ LinearChannel(W=self.A) @ V(
         id='z') @ GaussianChannel(var=self.Delta) @ O(id="y")
     model = model.to_model()
     return model
Ejemplo n.º 2
0
def build_sparse_fft_student(size, prior_var, fft_rho, fft_var, noise_var):
    x_shape = (size, )
    fft_shape = (2, ) + x_shape
    student = (GaussianPrior(size=size, var=prior_var) @ SIMOVariable(
        id="x", n_next=2) @ (GaussianChannel(var=noise_var) @ O("y") +
                             (DFTChannel(real=True) + GaussBernouilliPrior(
                                 size=fft_shape, var=fft_var, rho=fft_rho))
                             @ MILeafVariable(id="z", n_prev=2))).to_model()
    return student
Ejemplo n.º 3
0
def build_sparse_grad_student(N, rho, noise_var):
    x_shape = (N, )
    z_shape = (1, N)
    student = (GaussianPrior(size=x_shape) @ SIMOVariable(id="x", n_next=2) @ (
        GaussianChannel(var=noise_var) @ O("y") +
        (GradientChannel(shape=x_shape) + GaussBernoulliPrior(
            size=z_shape, rho=rho)) @ MILeafVariable(id="z", n_prev=2))
               ).to_model()
    return student
Ejemplo n.º 4
0
def run_benchmark(alpha, algo, seed):
    # create scenario
    N, rho, noise_var = 1000, 0.05, 1e-2
    M = int(alpha * N)
    A = GaussianEnsemble(M=M, N=N).generate()
    t0 = time()
    model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A)
             @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model()
    t1 = time()
    record = {"svd_time": t1 - t0}  # svd precomputation time
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    y = scenario.observations["y"]
    # run algo
    t0 = time()
    if algo == "SE":
        x_data = scenario.run_se(max_iter=1000, damping=0.1)
        record["mse"] = x_data["x"]["v"]
        record["n_iter"] = x_data["n_iter"]
    if algo == "EP":
        x_data = scenario.run_ep(max_iter=1000, damping=0.1)
        x_pred = x_data["x"]["r"]
        record["n_iter"] = x_data["n_iter"]
    if algo == "LassoCV":
        lasso = LassoCV(cv=5)
        lasso.fit(A, y)
        x_pred = lasso.coef_
        record["param_scikit"] = lasso.alpha_
        record["n_iter"] = lasso.n_iter_
    if algo == "Lasso":
        optim = pd.read_csv("optimal_param_lasso.csv")
        param_scaled = np.interp(alpha, optim["alpha"], optim["param_scaled"])
        param_scikit = noise_var * param_scaled / (M * rho)
        lasso = Lasso(alpha=param_scikit)
        lasso.fit(A, y)
        x_pred = lasso.coef_
        record["param_scikit"] = param_scikit
        record["n_iter"] = lasso.n_iter_
    if algo == "pymc3":
        with pm.Model():
            ber = pm.Bernoulli("ber", p=rho, shape=N)
            nor = pm.Normal("nor", mu=0, sd=1, shape=N)
            x = pm.Deterministic("x", ber * nor)
            likelihood = pm.Normal("y",
                                   mu=pm.math.dot(A, x),
                                   sigma=np.sqrt(noise_var),
                                   observed=y)
            trace = pm.sample(draws=1000, chains=1, return_inferencedata=False)
        x_pred = trace.get_values('x').mean(axis=0)
    t1 = time()
    record["time"] = t1 - t0
    if algo != "SE":
        record["mse"] = mean_squared_error(x_pred, scenario.x_true["x"])
    return record
Ejemplo n.º 5
0
    def sample_from_prior(self, prior_x):
        if self.plot_prior_sample:
            model = prior_x @ O(id="y")
            model = model.to_model_dag()
            prior_sample = model.to_model()
            fig, axs = plt.subplots(4, 4, figsize=(4, 4))
            for ax in axs.ravel():
                sample = prior_sample.sample()['y']
                ax.set_axis_off()
                ax.imshow(sample.reshape(28, 28), cmap="gray")

            plt.show()
Ejemplo n.º 6
0
def mse_lasso(alpha, param_scaled, seed):
    # create scenario
    N, rho, noise_var = 1000, 0.05, 1e-2
    M = int(alpha * N)
    A = GaussianEnsemble(M=M, N=N).generate()
    model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A)
             @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model()
    scenario = BayesOptimalScenario(model, x_ids=["x"])
    scenario.setup(seed)
    y = scenario.observations["y"]
    # run lasso
    param_scikit = noise_var * param_scaled / (M * rho)
    lasso = Lasso(alpha=param_scikit)
    lasso.fit(A, y)
    x_pred = lasso.coef_
    mse = mean_squared_error(x_pred, scenario.x_true["x"])
    return mse
Ejemplo n.º 7
0
    def sample_from_prior(self, prior_x):
        """ sample from prior """
        model = prior_x @ O(id="y")
        model = model.to_model()
        prior_sample = model.to_model()
        fig, axs = plt.subplots(4, 4, figsize=(4, 4))
        for ax in axs.ravel():
            sample = prior_sample.sample()['y']
            ax.set_axis_off()
            ax.imshow(sample.reshape(28, 28), cmap="gray")

        dir_fig = 'Figures/'
        os.makedirs(dir_fig) if not os.path.exists(dir_fig) else 0
        file_name = f"/{dir_fig}Prior_{self.prior_params['name']}_{self.prior_params['type']}_{self.prior_params['id']}.pdf"
        plt.savefig(file_name, format='pdf', dpi=1000,
                    bbox_inches="tight", pad_inches=0.1)
        plt.show(block=True)
        input("...")
        plt.close()
def conv_sensing(measurement_ratios,
                 filter_size,
                 solver="AMP",
                 sparse_in_dct=False,
                 width=50,
                 depth=20,
                 delta=1e-2,
                 sparsity=0.5,
                 n_rep=10):

    N = width * depth
    signal = GaussBernouilliPrior(size=(N, ), rho=sparsity)

    def sample_trnsf():
        if (sparse_in_dct):
            D = scipy.linalg.block_diag(
                *[dctmtx(width).T for _ in range(depth)])
        else:
            D = np.eye(N)
        return D

    recovery_per_alpha = []

    for alpha in measurement_ratios:
        recoveries = []
        for rep in range(n_rep):
            out_channels = int(np.rint(alpha * depth))
            ensemble = ChannelConvEnsemble(width=width,
                                           in_channels=depth,
                                           out_channels=out_channels,
                                           k=filter_size)
            A = ensemble.generate()
            C = sample_trnsf()

            teacher = conv_model(
                A, C, signal) @ GaussianChannel(var=delta) @ O(id="y")
            teacher = teacher.to_model()
            sample = teacher.sample()

            if (solver == "AMP"):

                max_iter = 20
                damping = 0.1

                student = conv_model(A, C, signal) @ GaussianLikelihood(
                    y=sample['y'], var=delta)
                student = student.to_model_dag()
                student = student.to_model()
                ep = ExpectationPropagation(student)
                ep.iterate(max_iter=max_iter,
                           damping=damping,
                           callback=EarlyStopping(tol=1e-8))
                data_ep = ep.get_variables_data((['x']))
                mse = np.mean((data_ep['x']['r'] - sample['x'])**2)
                recoveries.append(mse)

            elif (solver == "CVX"):

                reg_param = 0.001

                x = cp.Variable(shape=(N, ), name="x")
                lmbda = cp.Parameter(nonneg=True)
                objective = cp.norm2(A @ C @ x -
                                     sample['y'])**2 + lmbda * cp.norm1(x)
                problem = cp.Problem(cp.Minimize(objective))
                lmbda.value = reg_param
                problem.solve(abstol=1e-6)
                mse = np.mean((x.value - sample['x'])**2)
                recoveries.append(mse)
            else:
                raise ValueError("Solver must be 'AMP' or 'CVX'")
        recovery_per_alpha.append(recoveries)
    return recovery_per_alpha
def recovery(measurement_ratios,
             filter_size,
             solver="AMP",
             prior="conv",
             sparse_in_dct=False,
             N=1000,
             delta=1e-2,
             sparsity=0.5,
             n_rep=10):

    signal = GaussBernouilliPrior(size=(N, ), rho=sparsity)
    prior_conv_ens = ConvEnsemble(N, filter_size)

    def sample_trnsf():
        if (sparse_in_dct):
            D = dctmtx(N).T
        else:
            D = np.eye(N)

        if (prior == "conv"):
            C = prior_conv_ens.generate()
        elif (prior == "sparse"):
            C = np.eye(N)
        else:
            raise ValueError("Prior must be 'conv' or 'sparse'")
        return D @ C

    recovery_per_alpha = []

    for alpha in measurement_ratios:
        recoveries = []
        for rep in range(n_rep):
            M = int(alpha * N)
            ensemble = GaussianEnsemble(M, N)
            A = ensemble.generate()
            C = sample_trnsf()

            teacher = conv_model(
                A, C, signal) @ GaussianChannel(var=delta) @ O(id="y")
            teacher = teacher.to_model()
            sample = teacher.sample()

            if (solver == "AMP"):

                max_iter = 20
                damping = 0.1

                student = conv_model(A, C, signal) @ GaussianLikelihood(
                    y=sample['y'], var=delta)
                student = student.to_model_dag()
                student = student.to_model()
                ep = ExpectationPropagation(student)
                ep.iterate(max_iter=max_iter,
                           damping=damping,
                           callback=EarlyStopping(tol=1e-8))
                data_ep = ep.get_variables_data((['x']))
                mse = np.mean((data_ep['x']['r'] - sample['x'])**2)
                recoveries.append(mse)

            elif (solver == "CVX"):

                reg_param = 0.001

                x = cp.Variable(shape=(N, ), name="x")
                lmbda = cp.Parameter(nonneg=True)
                objective = cp.norm2(A @ C @ x -
                                     sample['y'])**2 + lmbda * cp.norm1(x)
                problem = cp.Problem(cp.Minimize(objective))
                lmbda.value = reg_param
                problem.solve(abstol=1e-6)
                mse = np.mean((x.value - sample['x'])**2)
                recoveries.append(mse)
            else:
                raise ValueError("Solver must be 'AMP' or 'CVX'")
        recovery_per_alpha.append(recoveries)
    return recovery_per_alpha
Ejemplo n.º 10
0
        self.size = x.shape

    def math(self):
        return "coon"

    def sample(self):
        return self.x


coon = CoonPrior()
x_shape = coon.size
noise = GaussianChannel(var=0.5)
grad_shape = (2,) + x_shape
teacher = (
    coon @ SIMOVariable("x", n_next=2) @ (
        GradientChannel(x_shape) @ O("x'") +
        noise @ O("y")
    )
).to_model()
# teacher.plot()

sample = teacher.sample()
plot_histograms(sample)

plot_data(sample, sample["y"])

# %%
# Sparse gradient denoising
# sparse grad denoiser
grad_shape = (2,) + x_shape
Ejemplo n.º 11
0
    def __init__(self):
        x = face(gray=True).astype(np.float32)
        x = (x - x.mean()) / x.std()
        self.x = x
        self.size = x.shape

    def sample(self):
        return self.x


prior = RaccoonPrior()
x_shape = prior.size
noise = GaussianChannel(var=0.5)
grad_shape = (2, ) + x_shape
teacher = (prior @ SIMOVariable("x", n_next=2)
           @ (GradientChannel(x_shape) @ O("x'") + noise @ O("y"))).to_model()
sample = teacher.sample()
plot_histograms(sample)
plot_data(sample, sample["y"])

# %%
# Sparse gradient denoising
grad_shape = (2, ) + x_shape
sparse_grad = (GaussianPrior(size=x_shape) @ SIMOVariable(id="x", n_next=2)
               @ (noise @ O("y") +
                  (GradientChannel(shape=x_shape) +
                   GaussBernoulliPrior(size=grad_shape, var=0.7, rho=0.9))
                  @ MILeafVariable(id="x'", n_prev=2))).to_model()
scenario = TeacherStudentScenario(teacher, sparse_grad, x_ids=["x", "x'"])
scenario.setup(seed=1)
scenario.student.plot()