def get_signals(n_channels=50, n_times_atom=64, n_times_valid=640, sigma=.01, random_state=None): """Generate a signal following the sparse linear model with a rank1 triangle and square atoms and a Bernoulli-uniform distribution.""" n_atoms = 2 rng = np.random.RandomState(random_state) v0 = get_atoms('triangle', n_times_atom) # temporal atoms v1 = get_atoms('square', n_times_atom) u0 = get_atoms('sin', n_channels) # spatial maps u1 = get_atoms('cos', n_channels) u0[0] = u1[0] = 1 uv = np.array([np.r_[u0, v0], np.r_[u1, v1]]) uv = prox_uv(uv, 'separate', n_channels) # add atoms z = np.array([sparse.random(n_atoms, n_times_valid, density=.05, random_state=random_state).toarray() for _ in range(n_trials)]) z = np.swapaxes(z, 0, 1) X = construct_X_multi(z, uv, n_channels=n_channels) X = X + sigma * rng.randn(*X.shape) uv_init = rng.randn(n_atoms, n_channels + n_times_atom) uv_init = prox_uv(uv_init, uv_constraint='separate', n_channels=n_channels) return X, uv, uv_init
def test_update_uv(solver_d, uv_constraint): # Generate synchronous D n_times_atom, n_times = 10, 100 n_channels = 5 n_atoms = 2 n_trials = 3 rng = np.random.RandomState() z = rng.normal(size=(n_trials, n_atoms, n_times - n_times_atom + 1)) uv0 = rng.normal(size=(n_atoms, n_channels + n_times_atom)) uv1 = rng.normal(size=(n_atoms, n_channels + n_times_atom)) uv0 = prox_uv(uv0) uv1 = prox_uv(uv1) X = construct_X_multi(z, D=uv0, n_channels=n_channels) def objective(uv): X_hat = construct_X_multi(z, D=uv, n_channels=n_channels) return compute_objective(X, X_hat, loss='l2') # Ensure that the known optimal point is stable uv = update_uv(X, z, uv0, max_iter=1000, verbose=0) cost = objective(uv) assert np.isclose(cost, 0), "optimal point not stable" assert np.allclose(uv, uv0), "optimal point not stable" # Ensure that the update is going down from a random initialization cost0 = objective(uv1) uv, pobj = update_uv(X, z, uv1, debug=True, max_iter=5000, verbose=10, solver_d=solver_d, momentum=False, eps=1e-10, uv_constraint=uv_constraint) cost1 = objective(uv) msg = "Learning is not going down" try: assert cost1 < cost0, msg # assert np.isclose(cost1, 0, atol=1e-7) except AssertionError: import matplotlib.pyplot as plt pobj = np.array(pobj) plt.semilogy(pobj) plt.title(msg) plt.show() raise
def run_one(reg, sigma, n_atoms, n_times_atom, max_n_channels, n_times_valid, n_iter, run_n_channels, random_state): """Run the benchmark for a given set of parameter.""" X, uv, uv_init = get_signals(max_n_channels, n_times_atom, n_times_valid, sigma, random_state) reg_ = reg * get_lambda_max(X, uv_init).max() # reg_ *= run_n_channels uv_init_ = prox_uv(np.c_[uv_init[:, :run_n_channels], uv_init[:, max_n_channels:]]) uv_ = prox_uv(np.c_[uv[:, :run_n_channels], uv[:, max_n_channels:]], uv_constraint='separate', n_channels=max_n_channels) def cb(X, uv_hat, z_hat, pobj): it = len(pobj) // 2 if it % 10 == 0: print("[channels{}] iter{} score sig={:.2e}: {:.3e}".format( run_n_channels, it, sigma, score_uv(uv_, uv_hat, run_n_channels))) pobj, times, uv_hat, z_hat, reg = learn_d_z_multi( X[:, :run_n_channels, :], n_atoms, n_times_atom, random_state=random_state, # callback=cb, n_iter=n_iter, n_jobs=1, reg=reg_, uv_constraint='separate', solver_d='alternate_adaptive', solver_d_kwargs={'max_iter': 50}, solver_z="lgcd", solver_z_kwargs=dict(tol=1e-3, maxiter=500), use_sparse_z=True, D_init=uv_init_, verbose=VERBOSE, ) score = score_uv(uv_, uv_hat, run_n_channels) print("=" * 79 + "\n" + "[channels{}-{:.2e}-{}] iter {} score sig={:.2e}: {:.3e}\n".format( run_n_channels, reg, random_state, len(pobj) // 2, sigma, score) + "=" * 79) return random_state, sigma, run_n_channels, score, uv, uv_hat, reg
def test_uv_D(): rng = check_random_state(42) n_times_atoms = 21 n_atoms = 15 n_channels = 30 uv = rng.randn(n_atoms, n_channels + n_times_atoms) uv = prox_uv(uv, uv_constraint='separate', n_channels=n_channels) ds = get_D(uv, n_channels) uv_hat = get_uv(ds) assert_allclose(abs(uv / uv_hat), 1)