def run_l_bfgs(X, ds_init, reg, n_iter, random_state, label, factr_d=1e7, factr_z=1e14): assert X.ndim == 2 n_atoms, n_times_atom = ds_init.shape pobj, times, d_hat, z_hat = learn_d_z(X, n_atoms, n_times_atom, func_d=update_d_block, solver_z='l-bfgs', solver_z_kwargs=dict(factr=factr_z), reg=reg, n_iter=n_iter, solver_d_kwargs=dict(factr=factr_d), random_state=random_state, ds_init=ds_init, n_jobs=1, verbose=verbose) return pobj[::2], np.cumsum(times)[::2], d_hat, z_hat
def test_learn_codes_atoms_sample_weights(func_d, solver_z): """Test weighted CSC.""" rng = check_random_state(42) X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms) ds_init = rng.randn(n_atoms, n_times_atom) X += 0.1 * rng.randn(*X.shape) n_iter = 3 reg = 0.1 # sample_weights all equal to one is equivalent to sample_weights=None. sample_weights = np.ones_like(X) pobj_0, _, _, _ = learn_d_z( X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=sample_weights, ds_init=ds_init) pobj_1, _, _, _ = learn_d_z( X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=None, ds_init=ds_init) assert np.allclose(pobj_0, pobj_1) if getattr(func_d, "keywords", {}).get("projection") != 'primal': # sample_weights equal to 2 is equivalent to having twice the samples. # (with the regularization equal to zero) reg = 0. n_iter = 3 n_duplicated = n_trials // 3 sample_weights = np.ones_like(X) sample_weights[:n_duplicated] = 2 X_duplicated = np.vstack([X[:n_duplicated], X]) pobj_0, _, d_hat_0, z_hat_0 = learn_d_z( X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=sample_weights, ds_init=ds_init, solver_z_kwargs=dict(factr=1e9)) pobj_1, _, d_hat_1, z_hat_1 = learn_d_z( X_duplicated, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=None, ds_init=ds_init, solver_z_kwargs=dict(factr=1e9)) pobj_1 /= pobj_0[0] pobj_0 /= pobj_0[0] assert np.allclose(pobj_0, pobj_1, rtol=0, atol=1e-3)
def run_fista(X, ds_init, reg, n_iter, random_state, label): assert X.ndim == 2 n_atoms, n_times_atom = ds_init.shape pobj, times, d_hat, z_hat = learn_d_z( X, n_atoms, n_times_atom, func_d=update_d_block, solver_z='fista', solver_z_kwargs=dict(max_iter=2), reg=reg, n_iter=n_iter, random_state=random_state, ds_init=ds_init, n_jobs=1, verbose=verbose) return pobj[::2], np.cumsum(times)[::2], d_hat, z_hat
def test_learn_codes_atoms(): """Test that the objective function is decreasing.""" random_state = 1 n_iter = 3 X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms) func_d_0 = partial(update_d_block, projection='dual', n_iter=5) func_d_1 = partial(update_d_block, projection='primal', n_iter=5) for func_d in [func_d_0, func_d_1, update_d]: for solver_z in ('l-bfgs', 'ista', 'fista'): pobj, times, d_hat, _ = learn_d_z( X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, verbose=0, random_state=random_state, solver_z_kwargs=dict(factr=1e7, max_iter=200)) assert np.all(np.diff(pobj) < 0)
def test_n_jobs_larger_than_n_trials(): n_trials = 2 X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms) pobj, times, d_hat, _, _ = learn_d_z(X, n_atoms, n_times_atom, n_iter=3, n_jobs=3)
def test_learn_codes_atoms_sample_weights(): """Test weighted CSC.""" rng = check_random_state(42) X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms) ds_init = rng.randn(n_atoms, n_times_atom) X += 0.1 * rng.randn(*X.shape) n_iter = 3 reg = 0.1 func_d_0 = partial(update_d_block, projection='dual') func_d_1 = partial(update_d_block, projection='primal') func_d_list = [func_d_0, func_d_1, update_d] # sample_weights all equal to one is equivalent to sample_weights=None. sample_weights = np.ones_like(X) for func_d in func_d_list: for solver_z in ('l_bfgs', 'ista', 'fista'): pobj_0, _, _, _ = learn_d_z(X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=sample_weights, ds_init=ds_init) pobj_1, _, _, _ = learn_d_z(X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=None, ds_init=ds_init) assert_allclose(pobj_0, pobj_1) # sample_weights equal to 2 is equivalent to having twice the samples. # (with the regularization equal to zero) reg = 0. n_iter = 3 n_duplicated = n_trials // 3 sample_weights = np.ones_like(X) sample_weights[:n_duplicated] = 2 X_duplicated = np.vstack([X[:n_duplicated], X]) for func_d in [update_d, update_d_block]: for solver_z in ('l_bfgs', 'ista', 'fista'): pobj_0, _, d_hat_0, z_hat_0 = learn_d_z( X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=sample_weights, ds_init=ds_init, solver_z_kwargs=dict(factr=1e9)) pobj_1, _, d_hat_1, z_hat_1 = learn_d_z( X_duplicated, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0, verbose=0, sample_weights=None, ds_init=ds_init, solver_z_kwargs=dict(factr=1e9)) assert_allclose(pobj_0, pobj_1, rtol=0, atol=1e-3)