def build_model(self): self.prior = GaussBernouilliPrior(size=(self.N, ), rho=self.rho) ensemble = GaussianEnsemble(self.M, self.N) self.A = ensemble.generate() model = self.prior @ V(id="x") @ LinearChannel(W=self.A) @ V( id='z') @ GaussianChannel(var=self.Delta) @ O(id="y") model = model.to_model() return model
def init_model(self, prior_x): """ Init the model with inpainting/denoising task """ self.y_ids = ['y'] if self.model_params['name'] == 'denoising': model = prior_x @ V(id="x") self.x_ids = ['x'] elif self.model_params['name'] == 'inpainting': # Create sensing matrix N = self.model_params['N'] F = np.identity(N) p_rem = self.model_params['p_rem'] ## Remove a Band ## if self.model_params['type'] == 'band': N_rem = int(p_rem * N / 100) id_0 = int(N / 2) - int(N_rem / 2) for rem in range(id_0, id_0 + N_rem): F[rem, rem] = 0 ## Remove randomly ## if self.model_params['type'] == 'uniform': tab = np.arange(1, N) np.random.shuffle(tab) for i in range(int(p_rem * N / 100)): rem = tab[i] F[rem, rem] = 0 ## Diagonal ## if self.model_params['type'] == 'diagonal': l = int(p_rem * 28 / 100) for j in range(-int(l / 2), int(l / 2), 1): for i in range(1, 27, 1): ind = i * 28 + i + j F[ind, ind] = 0 ind = i * 28 - i - j F[ind, ind] = 0 F_tot = F F_obs = np.delete(F, np.where(~F.any(axis=0))[0], axis=0) self.F = F_obs self.F_tot = F_tot # Model model = prior_x @ V(id="x") @ LinearChannel(F_obs, name="F") @ V(id="z") # Variables self.x_ids = ['x'] else: raise NotImplementedError return model
def init_model(self, prior_x): self.y_ids = ['y'] if self.model_params['name'] == 'denoising': # Model model = prior_x @ V(id="x") # Variables self.x_ids = ['x'] self.list_var.extend(['x']) elif self.model_params['name'] == 'inpainting': # Create sensing matrix N_rem = self.model_params['N_rem'] N = self.model_params['N'] F = np.identity(N) ## Remove a Band ## if self.model_params['type'] == 'band': id_0 = int(N / 2) - int(N_rem / 2) for rem in range(id_0, id_0 + N_rem): F[rem, rem] = 0 ## Remove randomly ## if self.model_params['type'] == 'random': for i in range(N_rem): rem = random.randrange(1, N, 1) F[rem, rem] = 0 ## Diagonal ## if self.model_params['type'] == 'diagonal': l = 4 for j in range(-int(l / 2), int(l / 2), 1): for i in range(1, 27, 1): ind = i * 28 + i + j F[ind, ind] = 0 ind = i * 28 - i - j F[ind, ind] = 0 F_tot = F F_obs = np.delete(F, np.where(~F.any(axis=0))[0], axis=0) self.F = F_obs self.F_tot = F_tot # Model model = prior_x @ V(id="x") @ LinearChannel(F_obs, name="F") @ V(id="z") # Variables self.x_ids = ['x'] self.list_var.extend(['x']) else: raise NotImplementedError return model
def run_benchmark(alpha, algo, seed): # create scenario N, rho, noise_var = 1000, 0.05, 1e-2 M = int(alpha * N) A = GaussianEnsemble(M=M, N=N).generate() t0 = time() model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A) @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model() t1 = time() record = {"svd_time": t1 - t0} # svd precomputation time scenario = BayesOptimalScenario(model, x_ids=["x"]) scenario.setup(seed) y = scenario.observations["y"] # run algo t0 = time() if algo == "SE": x_data = scenario.run_se(max_iter=1000, damping=0.1) record["mse"] = x_data["x"]["v"] record["n_iter"] = x_data["n_iter"] if algo == "EP": x_data = scenario.run_ep(max_iter=1000, damping=0.1) x_pred = x_data["x"]["r"] record["n_iter"] = x_data["n_iter"] if algo == "LassoCV": lasso = LassoCV(cv=5) lasso.fit(A, y) x_pred = lasso.coef_ record["param_scikit"] = lasso.alpha_ record["n_iter"] = lasso.n_iter_ if algo == "Lasso": optim = pd.read_csv("optimal_param_lasso.csv") param_scaled = np.interp(alpha, optim["alpha"], optim["param_scaled"]) param_scikit = noise_var * param_scaled / (M * rho) lasso = Lasso(alpha=param_scikit) lasso.fit(A, y) x_pred = lasso.coef_ record["param_scikit"] = param_scikit record["n_iter"] = lasso.n_iter_ if algo == "pymc3": with pm.Model(): ber = pm.Bernoulli("ber", p=rho, shape=N) nor = pm.Normal("nor", mu=0, sd=1, shape=N) x = pm.Deterministic("x", ber * nor) likelihood = pm.Normal("y", mu=pm.math.dot(A, x), sigma=np.sqrt(noise_var), observed=y) trace = pm.sample(draws=1000, chains=1, return_inferencedata=False) x_pred = trace.get_values('x').mean(axis=0) t1 = time() record["time"] = t1 - t0 if algo != "SE": record["mse"] = mean_squared_error(x_pred, scenario.x_true["x"]) return record
def mse_lasso(alpha, param_scaled, seed): # create scenario N, rho, noise_var = 1000, 0.05, 1e-2 M = int(alpha * N) A = GaussianEnsemble(M=M, N=N).generate() model = (GaussBernoulliPrior(size=N, rho=rho) @ V("x") @ LinearChannel(A) @ V("z") @ GaussianChannel(var=noise_var) @ O("y")).to_model() scenario = BayesOptimalScenario(model, x_ids=["x"]) scenario.setup(seed) y = scenario.observations["y"] # run lasso param_scikit = noise_var * param_scaled / (M * rho) lasso = Lasso(alpha=param_scikit) lasso.fit(A, y) x_pred = lasso.coef_ mse = mean_squared_error(x_pred, scenario.x_true["x"]) return mse
def build_VAE_prior(self, params): """ Build a VAE prior with loaded weights """ shape = self.shape assert self.N == 784 biases, weights = self.load_VAE_prior(params) if params['id'] == '20_relu_400_sigmoid_784_bias': D, N1, N = 20, 400, 28*28 W1, W2 = weights b1, b2 = biases prior_x = (GaussianPrior(size=D) @ V(id="z_0") @ LinearChannel(W1, name="W_1") @ V(id="Wz_1") @ BiasChannel(b1) @ V(id="b_1") @ LeakyReluChannel(0) @ V(id="z_1") @ LinearChannel(W2, name="W_2") @ V(id="Wz_2") @ BiasChannel(b2) @ V(id="b_2") @ HardTanhChannel() @ V(id="z_2") @ ReshapeChannel(prev_shape=self.N, next_shape=self.shape)) else: raise NotImplementedError return prior_x
return model # %% # Create a sparse teacher N = 250 alpha = 1 rho = 0.1 Delta = 1e-2 teacher = SparseTeacher(N=N, alpha=alpha, rho=rho, Delta=Delta) sample = (teacher.model).sample() # %% prior = GaussBernouilliPrior(size=(N, ), rho=rho) student = prior @ V(id="x") @ LinearChannel(W=teacher.A) @ V( id='z') @ GaussianLikelihood(y=sample['y'], var=Delta) student = student.to_model_dag() student = student.to_model() student.plot() # %% max_iter = 20 damping = 0.1 ep = ExpectationPropagation(student) ep.iterate(max_iter=max_iter, damping=damping, callback=EarlyStopping(tol=1e-8)) data_ep = ep.get_variables_data(['x']) mse = mean_squared_error(data_ep['x']['r'], sample['x'])
def conv_model(A, C, prior): return prior @ V(id="x") @ LinearChannel(W=C) @ V(id="Cx") @ LinearChannel( W=A) @ V(id="z")