# $F \in \mathbb{R}^{M \times N}$ is a Gaussian random matrix. # You can build the perceptron directly, or use the `glm_generative` model builder. teacher = glm_generative(N=1000, alpha=1.7, ensemble_type="gaussian", prior_type="binary", output_type="sgn") scenario = BayesOptimalScenario(teacher, x_ids=["x"]) scenario.setup(seed=42) scenario.student.plot() # %% # EP dynamics ep_evo = scenario.ep_convergence(metrics=["mse"], max_iter=30, callback=EarlyStoppingEP()) qplot(ep_evo, x="iter", y=["mse", "v"], y_markers=[".", "-"], y_legend=True) # %% # Recovered signal plot_compare(scenario.x_true["x"], scenario.x_pred["x"]) # %% # Compare EP vs SE # ---------------- # See `data/perceptron_ep_vs_se.py` for the corresponding script. rename = { "alpha": r"$\alpha$", "n_iter": "iterations", "p_pos": r"$p_+$", "source=": ""
) axes[2].set_title(r'$z = \textrm{DFT}(x)$') axes[2].set_xlim(0, 25) for axe in axes: axe.legend(fancybox=True, shadow=False, loc="lower center", fontsize=20) fig.tight_layout() # %% # Parameters N, rho, noise_var, seed = 100, 0.02, 0.1, 1 prior_var, fft_var = 1, 18.75 # %% # We create the teacher student scenario teacher = SparseFFT_Teacher(N, noise_var) teacher.info() student = build_sparse_fft_student(N, prior_var, rho, fft_var, noise_var) scenario = TeacherStudentScenario(teacher, student, x_ids=["x", "z"]) scenario.setup(seed=seed) # %% # Run EP _ = scenario.run_ep( max_iter=1000, damping=0.1, callback=EarlyStoppingEP(tol=1e-2) ) # %% # Plot plot_sparse_fft(scenario)
teacher = SparseGradTeacher(size=N, grad_rho=rho, noise_var=1e-2) # Build the student student = build_sparse_grad_student(size=N, grad_rho=rho, noise_var=1e-2) # Create a Teacher Student Scenario # Variables to track # x_ids = ["x", "x'"] scenario = TeacherStudentScenario(teacher, student, x_ids=x_ids) scenario.setup(seed=seed) # %% # Run EP # Max number of EP iterations # max_iter = 1000 # Damping value # damping = 0.1 scenario.run_ep(max_iter=max_iter, damping=damping, callback=EarlyStoppingEP(tol=1e-2) ) dic = {'model': 'sparse_gradient', 'N': N, 'rho': rho, 'seed': seed, 'y': scenario.observations["y"], 'x': scenario.x_true, 'x_pred': scenario.x_pred} # %% # Plot plot_sparse_gradient(dic, save_fig=False)