def test_iterative_reweighted_mxne(): """Test convergence of irMxNE solver""" n, p, t, alpha = 30, 40, 20, 1 rng = np.random.RandomState(0) G = rng.randn(n, p) G /= np.std(G, axis=0)[None, :] X = np.zeros((p, t)) X[0] = 3 X[4] = -2 M = np.dot(G, X) X_hat_l21, _, _ = mixed_norm_solver( M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1, active_set_size=None, debias=False, solver='bcd') X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None, debias=False, solver='bcd') X_hat_prox, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None, debias=False, solver='prox') assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3) assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3) X_hat_prox, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None, debias=True, solver='prox') assert_array_equal(np.where(active_set)[0], [0, 4]) X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, solver='bcd') assert_array_equal(np.where(active_set)[0], [0, 4]) X_hat_cd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None, debias=True, solver='cd') assert_array_equal(np.where(active_set)[0], [0, 4]) assert_array_almost_equal(X_hat_prox, X_hat_cd, 5) assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5) X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, n_orient=2, solver='bcd') assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) # suppress a coordinate-descent warning here with warnings.catch_warnings(record=True): X_hat_cd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, n_orient=2, solver='cd') assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) assert_array_equal(X_hat_bcd, X_hat_cd, 5) X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, n_orient=5) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) with warnings.catch_warnings(record=True): # coordinate-ascent warning X_hat_cd, active_set, _ = iterative_mixed_norm_solver( M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, n_orient=5, solver='cd') assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) assert_array_equal(X_hat_bcd, X_hat_cd, 5)
plt.title("Feature covariance") ############################################################################### # Simulation of the data with some noise M = G.dot(X_true) M += 0.2 * np.max(np.abs(M)) * rng.randn(n_samples, n_times) ############################################################################### # Define the regularization parameter and run the MM solver # --------------------------------------------------------- lambda_max = np.max(np.linalg.norm(np.dot(G.T, M), axis=1)) lambda_ref = lambda_percent / 100. * lambda_max X_mm, active_set_mm, E = \ iterative_mixed_norm_solver(M, G, lambda_ref, n_mxne_iter=10) pobj_l2half_X_mm = E[-1] print("Found support: %s" % np.where(active_set_mm)[0]) ############################################################################### # Run the solver # -------------- Xs, active_sets, lpp_samples, lpp_Xs, pobj_l2half_Xs = \ mm_mixed_norm_bayes(M, G, lambda_ref, K=K) # Plot if we found better local minima then the first result found be the plt.figure() plt.hist(pobj_l2half_Xs, bins=20, label="Modes obj.")