def test_window(rank1, solver_d, uv_constraint): # Smoke test that the parameter window does something n_trials, n_channels, n_times = 2, 3, 100 n_times_atom, n_atoms = 10, 4 rng = check_random_state(42) X = rng.randn(n_trials, n_channels, n_times) *_, uv_constraint_ = check_solver_and_constraints(rank1, solver_d, uv_constraint) D_init = init_dictionary(X, n_atoms, n_times_atom, rank1=rank1, uv_constraint=uv_constraint_, random_state=0) kwargs = dict(X=X, n_atoms=n_atoms, n_times_atom=n_times_atom, verbose=0, uv_constraint=uv_constraint, solver_d=solver_d, rank1=rank1, random_state=0, n_iter=1, solver_z='l-bfgs', D_init=D_init) res_False = learn_d_z_multi(window=False, **kwargs) res_True = learn_d_z_multi(window=True, **kwargs) assert not np.allclose(res_False[2], res_True[2])
def test_init_random(rank1, solver_d, uv_constraint): """""" n_trials, n_channels, n_times = 5, 3, 100 n_times_atom, n_atoms = 10, 4 _, uv_constraint_ = check_solver_and_constraints(rank1, solver_d, uv_constraint) if rank1: expected_shape = (n_atoms, n_channels + n_times_atom) prox = functools.partial(prox_uv, uv_constraint=uv_constraint_, n_channels=n_channels) else: expected_shape = (n_atoms, n_channels, n_times_atom) prox = prox_d X = np.random.randn(n_trials, n_channels, n_times) # Test that init_dictionary is doing what we expect for D_init random random_state = 42 D_hat = init_dictionary(X, n_atoms, n_times_atom, D_init='random', rank1=rank1, uv_constraint=uv_constraint_, random_state=random_state) rng = check_random_state(random_state) D_init = rng.randn(*expected_shape) D_init = prox(D_init) assert_allclose(D_hat, D_init, err_msg="The random state is not correctly " "used in init_dictionary .") # Test that learn_d_z_multi is doing what we expect for D_init random random_state = 27 _, _, D_hat, _, _ = learn_d_z_multi(X, n_atoms, n_times_atom, D_init='random', n_iter=0, rank1=rank1, solver_d=solver_d, uv_constraint=uv_constraint, random_state=random_state) rng = check_random_state(random_state) D_init = rng.randn(*expected_shape) D_init = prox(D_init) assert_allclose(D_hat, D_init, err_msg="The random state is not correctly " "used in learn_d_z_multi.")
def one_run(X, X_shape, random_state, method, n_atoms, n_times_atom, reg): assert X.shape == X_shape func, label, n_iter = method current_time = time.time() - START msg = ('%s - %s: started at T=%.0f sec' % (random_state, label, current_time)) print(colorify(msg, BLUE)) if len(X_shape) == 2: n_trials, n_times = X.shape n_channels = 1 X_init = X[:, None, :] else: n_trials, n_channels, n_times = X.shape X_init = X # use the same init for all methods ds_init = init_dictionary(X_init, n_atoms, n_times_atom, D_init='chunk', rank1=False, uv_constraint='separate', D_init_params=dict(), random_state=random_state) if len(X_shape) == 2: ds_init = ds_init[:, 0, :] # run the selected algorithm with one iter to remove compilation overhead _, _, _, _ = func(X, ds_init, reg, 1, random_state, label) # run the selected algorithm pobj, times, d_hat, z_hat = func(X, ds_init, reg, n_iter, random_state, label) # store z_hat in a sparse matrix to reduce size for z in z_hat: z[z < 1e-3] = 0 z_hat = [sp.csr_matrix(z) for z in z_hat] duration = time.time() - START - current_time current_time = time.time() - START msg = ('%s - %s: done in %.0f sec at T=%.0f sec' % (random_state, label, duration, current_time)) print(colorify(msg, GREEN)) return (random_state, label, np.asarray(pobj), np.asarray(times), np.asarray(d_hat), np.asarray(z_hat), n_atoms, n_times_atom, n_trials, n_times, n_channels, reg)
def test_init_array(rank1, solver_d, uv_constraint): n_trials, n_channels, n_times = 5, 3, 100 n_times_atom, n_atoms = 10, 4 _, uv_constraint_ = check_solver_and_constraints(rank1, solver_d, uv_constraint) if rank1: expected_shape = (n_atoms, n_channels + n_times_atom) prox = functools.partial(prox_uv, uv_constraint=uv_constraint_, n_channels=n_channels) else: expected_shape = (n_atoms, n_channels, n_times_atom) prox = prox_d X = np.random.randn(n_trials, n_channels, n_times) # Test that init_dictionary is doing what we expect for D_init array D_init = np.random.randn(*expected_shape) D_hat = init_dictionary(X, n_atoms, n_times_atom, D_init=D_init, rank1=rank1, uv_constraint=uv_constraint_) D_init = prox(D_init) assert_allclose(D_hat, D_init) assert id(D_hat) != id(D_init) # Test that learn_d_z_multi is doing what we expect for D_init array D_init = np.random.randn(*expected_shape) _, _, D_hat, _, _ = learn_d_z_multi(X, n_atoms, n_times_atom, D_init=D_init, n_iter=0, rank1=rank1, solver_d=solver_d, uv_constraint=uv_constraint) D_init = prox(D_init) assert_allclose(D_hat, D_init)
def test_init_shape(D_init, rank1): n_trials, n_channels, n_times = 5, 3, 100 n_times_atom, n_atoms = 10, 4 X = np.random.randn(n_trials, n_channels, n_times) expected_shape = (n_atoms, n_channels, n_times_atom) if rank1: expected_shape = (n_atoms, n_channels + n_times_atom) # Test that init_dictionary returns correct shape uv_hat = init_dictionary(X, n_atoms, n_times_atom, D_init=D_init, rank1=rank1, uv_constraint='separate', random_state=42) assert uv_hat.shape == expected_shape
def D_hat(X, rank1): return init_dictionary(X, N_ATOMS, N_TIMES_ATOM, random_state=0, rank1=rank1)
# Note the use of reshape to shape the signal as per alphacsc requirements: the # shape of the signal should be (n_trials, n_channels, n_times). Here, we have # a single-channel time series so it is (1, 1, n_times). from alphacsc.init_dict import init_dictionary # set dictionary size n_atoms = 8 # set individual atom (patch) size. n_times_atom = 200 D_init = init_dictionary(X, n_atoms=8, n_times_atom=200, rank1=False, window=True, D_init='chunk', random_state=60) print(D_init.shape) "" from alphacsc import BatchCDL cdl = BatchCDL( # Shape of the dictionary n_atoms, n_times_atom, rank1=False, uv_constraint='auto',