Beispiel #1
0
def test_learn_codes():
    """Test learning of codes."""
    thresh = 0.25

    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)

    for solver in ('l-bfgs', 'ista', 'fista'):
        z_hat = update_z(X,
                         ds,
                         reg,
                         solver=solver,
                         solver_kwargs=dict(factr=1e11, max_iter=50))

        X_hat = construct_X(z_hat, ds)
        assert np.corrcoef(X.ravel(), X_hat.ravel())[1, 1] > 0.99
        assert np.max(X - X_hat) < 0.1

        # Find position of non-zero entries
        idx = np.ravel_multi_index(z[0].nonzero(), z[0].shape)
        loc_x, loc_y = np.where(z_hat[0] > thresh)
        # shift position by half the length of atom
        idx_hat = np.ravel_multi_index((loc_x, loc_y), z_hat[0].shape)
        # make sure that the positions are a subset of the positions
        # in the original z
        mask = np.in1d(idx_hat, idx)
        assert np.sum(mask) == len(mask)
Beispiel #2
0
def test_learn_atoms():
    """Test learning of atoms."""
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    d_hat, _ = update_d(X, z, n_times_atom)

    assert np.allclose(ds, d_hat)

    X_hat = construct_X(z, d_hat)
    assert np.allclose(X, X_hat, rtol=1e-05, atol=1e-12)
Beispiel #3
0
def test_update_z_sample_weights():
    """Test z update with weights."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    b_hat_0 = rng.randn(n_atoms * (n_times - n_times_atom + 1))

    # Having sample_weights all identical is equivalent to having
    # sample_weights=None and a scaled regularization
    factor = 1.6
    sample_weights = np.ones_like(X) * factor
    for solver in ('l_bfgs', 'ista', 'fista'):
        z_0 = update_z(X,
                       ds,
                       reg * factor,
                       n_times_atom,
                       solver=solver,
                       solver_kwargs=dict(factr=1e7, max_iter=50),
                       b_hat_0=b_hat_0.copy(),
                       sample_weights=sample_weights)
        z_1 = update_z(X,
                       ds,
                       reg,
                       n_times_atom,
                       solver=solver,
                       solver_kwargs=dict(factr=1e7, max_iter=50),
                       b_hat_0=b_hat_0.copy(),
                       sample_weights=None)
        assert_allclose(z_0, z_1, rtol=1e-4)

    # All solvers should give the same results
    sample_weights = np.abs(rng.randn(*X.shape))
    sample_weights /= sample_weights.mean()
    z_list = []
    for solver in ('l_bfgs', 'ista', 'fista'):
        z_hat = update_z(X,
                         ds,
                         reg,
                         n_times_atom,
                         solver=solver,
                         solver_kwargs=dict(factr=1e7, max_iter=2000),
                         b_hat_0=b_hat_0.copy(),
                         sample_weights=sample_weights)
        z_list.append(z_hat)
    assert_allclose(z_list[0][z != 0], z_list[1][z != 0], rtol=1e-3)
    assert_allclose(z_list[0][z != 0], z_list[2][z != 0], rtol=1e-3)

    # And using no sample weights should give different results
    z_hat = update_z(X,
                     ds,
                     reg,
                     n_times_atom,
                     solver=solver,
                     solver_kwargs=dict(factr=1e7, max_iter=2000),
                     b_hat_0=b_hat_0.copy(),
                     sample_weights=None)
    assert_raises(AssertionError, assert_allclose, z_list[0][z != 0],
                  z_hat[z != 0], 1e-3)
Beispiel #4
0
def test_learn_codes_atoms():
    """Test that the objective function is decreasing."""
    random_state = 1
    n_iter = 3
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    func_d_0 = partial(update_d_block, projection='dual', n_iter=5)
    func_d_1 = partial(update_d_block, projection='primal', n_iter=5)
    for func_d in [func_d_0, func_d_1, update_d]:
        for solver_z in ('l-bfgs', 'ista', 'fista'):
            pobj, times, d_hat, _ = learn_d_z(
                X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
                reg=reg, n_iter=n_iter, verbose=0, random_state=random_state,
                solver_z_kwargs=dict(factr=1e7, max_iter=200))
            assert np.all(np.diff(pobj) < 0)
Beispiel #5
0
def test_update_d():
    """Test vanilla d update."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)

    # This number of iteration is 1 in the general case, but needs to be
    # increased to compare with update_d
    n_iter_d_block = 5

    # All solvers should give the same results
    d_hat_0, _ = update_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init)
    d_hat_1, _ = update_d_block(X, z, n_times_atom, lambd0=None,
                                ds_init=ds_init, n_iter=n_iter_d_block)
    assert np.allclose(d_hat_0, d_hat_1, rtol=1e-5)
Beispiel #6
0
def test_learn_codes_atoms_sample_weights(func_d, solver_z):
    """Test weighted CSC."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)
    X += 0.1 * rng.randn(*X.shape)
    n_iter = 3
    reg = 0.1

    # sample_weights all equal to one is equivalent to sample_weights=None.
    sample_weights = np.ones_like(X)
    pobj_0, _, _, _ = learn_d_z(
        X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
        reg=reg, n_iter=n_iter, random_state=0, verbose=0,
        sample_weights=sample_weights, ds_init=ds_init)
    pobj_1, _, _, _ = learn_d_z(
        X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
        reg=reg, n_iter=n_iter, random_state=0, verbose=0,
        sample_weights=None, ds_init=ds_init)

    assert np.allclose(pobj_0, pobj_1)

    if getattr(func_d, "keywords", {}).get("projection") != 'primal':
        # sample_weights equal to 2 is equivalent to having twice the samples.
        # (with the regularization equal to zero)
        reg = 0.
        n_iter = 3
        n_duplicated = n_trials // 3
        sample_weights = np.ones_like(X)
        sample_weights[:n_duplicated] = 2
        X_duplicated = np.vstack([X[:n_duplicated], X])
        pobj_0, _, d_hat_0, z_hat_0 = learn_d_z(
            X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
            reg=reg, n_iter=n_iter, random_state=0, verbose=0,
            sample_weights=sample_weights, ds_init=ds_init,
            solver_z_kwargs=dict(factr=1e9))
        pobj_1, _, d_hat_1, z_hat_1 = learn_d_z(
            X_duplicated, n_atoms, n_times_atom, func_d=func_d,
            solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0,
            verbose=0, sample_weights=None, ds_init=ds_init,
            solver_z_kwargs=dict(factr=1e9))

        pobj_1 /= pobj_0[0]
        pobj_0 /= pobj_0[0]
        assert np.allclose(pobj_0, pobj_1, rtol=0, atol=1e-3)
Beispiel #7
0
def test_update_d_sample_weights():
    """Test d update with weights."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)
    # we need noise to have different results with different sample_weights
    X += 0.1 * rng.randn(*X.shape)

    # This number of iteration is 1 in the general case, but needs to be
    # increased to compare with update_d
    n_iter = 5
    func_d_0 = partial(update_d_block, projection='dual', n_iter=n_iter)
    func_d_1 = partial(update_d_block, projection='primal', n_iter=n_iter,
                       solver_kwargs=dict(factr=1e3))
    func_d_list = [func_d_0, func_d_1, update_d]

    # Having sample_weights all identical is equivalent to having
    # sample_weights=None
    factor = 1.6
    sample_weights = np.ones_like(X) * factor
    for func_d in func_d_list:
        d_hat_0, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=sample_weights)
        d_hat_1, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=None)
        assert np.allclose(d_hat_0, d_hat_1, rtol=1e-5)

    # All solvers should give the same results
    sample_weights = np.abs(rng.randn(*X.shape))
    sample_weights /= sample_weights.mean()
    d_hat_list = []
    for func_d in func_d_list:
        d_hat, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                          sample_weights=sample_weights)
        d_hat_list.append(d_hat)
    for d_hat in d_hat_list[1:]:
        assert np.allclose(d_hat, d_hat_list[0], rtol=1e-5)

    # And using no sample weights should give different results
    for func_d in func_d_list:
        d_hat_2, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=None)
        with pytest.raises(AssertionError):
            assert np.allclose(d_hat, d_hat_2, 1e-7)
Beispiel #8
0
# The algorithm does not naturally lend itself to multiple atoms. Therefore,
# we simulate only one atom.
n_atoms = 1  # K

###############################################################################
# A minimum spacing between the windows averaged must be found.
min_spacing = 200  # G

###############################################################################
# Now, we can simulate
from alphacsc import check_random_state # noqa
from alphacsc.simulate import simulate_data # noqa

random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom,
                                   n_atoms, random_state_simulate,
                                   constant_amplitude=True)

rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)

###############################################################################
# We expect 10 occurences of the atom in total.
# So, let us define 10 random locations for the algorithm to start with.
# If this number is not known, we will end up estimating more/less windows.
import numpy as np # noqa
window_starts = rng.choice(np.arange(n_trials * n_times), size=n_trials)

###############################################################################
# Now, we apply the SWM algorithm now.
from alphacsc.other.swm import sliding_window_matching # noqa
Beispiel #9
0
n_times_atom = 64  # L
n_times = 512  # T
n_atoms = 2  # K
n_trials = 100  # N
n_iter = 50

reg = 0.1

###############################################################################
# Here, we simulate the data

from alphacsc.simulate import simulate_data  # noqa

random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom, n_atoms,
                                   random_state_simulate)

###############################################################################
# Add some noise and corrupt some trials

from scipy.stats import levy_stable  # noqa
from alphacsc import check_random_state  # noqa

# Add stationary noise:
fraction_corrupted = 0.02
n_corrupted_trials = int(fraction_corrupted * n_trials)

rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)

idx_corrupted = rng.randint(0, n_trials, size=n_corrupted_trials)
Beispiel #10
0
def test_z0_read_only():
    # If n_atoms == 1, the reshape in update_z does not copy the data (cf #26)
    n_atoms = 1
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    z.flags.writeable = False
    update_z(X, ds, 0.1, z0=z, solver='ista')
Beispiel #11
0
def test_n_jobs_larger_than_n_trials():
    n_trials = 2
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    pobj, times, d_hat, _, _ = learn_d_z(X, n_atoms, n_times_atom, n_iter=3,
                                         n_jobs=3)
Beispiel #12
0
def test_learn_codes_atoms_sample_weights():
    """Test weighted CSC."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)
    X += 0.1 * rng.randn(*X.shape)
    n_iter = 3
    reg = 0.1

    func_d_0 = partial(update_d_block, projection='dual')
    func_d_1 = partial(update_d_block, projection='primal')
    func_d_list = [func_d_0, func_d_1, update_d]

    # sample_weights all equal to one is equivalent to sample_weights=None.
    sample_weights = np.ones_like(X)
    for func_d in func_d_list:
        for solver_z in ('l_bfgs', 'ista', 'fista'):
            pobj_0, _, _, _ = learn_d_z(X,
                                        n_atoms,
                                        n_times_atom,
                                        func_d=func_d,
                                        solver_z=solver_z,
                                        reg=reg,
                                        n_iter=n_iter,
                                        random_state=0,
                                        verbose=0,
                                        sample_weights=sample_weights,
                                        ds_init=ds_init)
            pobj_1, _, _, _ = learn_d_z(X,
                                        n_atoms,
                                        n_times_atom,
                                        func_d=func_d,
                                        solver_z=solver_z,
                                        reg=reg,
                                        n_iter=n_iter,
                                        random_state=0,
                                        verbose=0,
                                        sample_weights=None,
                                        ds_init=ds_init)

            assert_allclose(pobj_0, pobj_1)

    # sample_weights equal to 2 is equivalent to having twice the samples.
    # (with the regularization equal to zero)
    reg = 0.
    n_iter = 3
    n_duplicated = n_trials // 3
    sample_weights = np.ones_like(X)
    sample_weights[:n_duplicated] = 2
    X_duplicated = np.vstack([X[:n_duplicated], X])
    for func_d in [update_d, update_d_block]:
        for solver_z in ('l_bfgs', 'ista', 'fista'):
            pobj_0, _, d_hat_0, z_hat_0 = learn_d_z(
                X,
                n_atoms,
                n_times_atom,
                func_d=func_d,
                solver_z=solver_z,
                reg=reg,
                n_iter=n_iter,
                random_state=0,
                verbose=0,
                sample_weights=sample_weights,
                ds_init=ds_init,
                solver_z_kwargs=dict(factr=1e9))
            pobj_1, _, d_hat_1, z_hat_1 = learn_d_z(
                X_duplicated,
                n_atoms,
                n_times_atom,
                func_d=func_d,
                solver_z=solver_z,
                reg=reg,
                n_iter=n_iter,
                random_state=0,
                verbose=0,
                sample_weights=None,
                ds_init=ds_init,
                solver_z_kwargs=dict(factr=1e9))

            assert_allclose(pobj_0, pobj_1, rtol=0, atol=1e-3)