예제 #1
0
def test_init_random(rank1, solver_d, uv_constraint):
    """"""
    n_trials, n_channels, n_times = 5, 3, 100
    n_times_atom, n_atoms = 10, 4

    _, uv_constraint_ = check_solver_and_constraints(rank1, solver_d,
                                                     uv_constraint)

    if rank1:
        expected_shape = (n_atoms, n_channels + n_times_atom)
        prox = functools.partial(prox_uv,
                                 uv_constraint=uv_constraint_,
                                 n_channels=n_channels)
    else:
        expected_shape = (n_atoms, n_channels, n_times_atom)
        prox = prox_d

    X = np.random.randn(n_trials, n_channels, n_times)

    # Test that init_dictionary is doing what we expect for D_init random
    random_state = 42
    D_hat = init_dictionary(X,
                            n_atoms,
                            n_times_atom,
                            D_init='random',
                            rank1=rank1,
                            uv_constraint=uv_constraint_,
                            random_state=random_state)
    rng = check_random_state(random_state)

    D_init = rng.randn(*expected_shape)
    D_init = prox(D_init)
    assert_allclose(D_hat,
                    D_init,
                    err_msg="The random state is not correctly "
                    "used in init_dictionary .")

    # Test that learn_d_z_multi is doing what we expect for D_init random
    random_state = 27
    _, _, D_hat, _, _ = learn_d_z_multi(X,
                                        n_atoms,
                                        n_times_atom,
                                        D_init='random',
                                        n_iter=0,
                                        rank1=rank1,
                                        solver_d=solver_d,
                                        uv_constraint=uv_constraint,
                                        random_state=random_state)

    rng = check_random_state(random_state)
    D_init = rng.randn(*expected_shape)
    D_init = prox(D_init)
    assert_allclose(D_hat,
                    D_init,
                    err_msg="The random state is not correctly "
                    "used in learn_d_z_multi.")
예제 #2
0
def test_learn_d_z_multi(loss, solver_d, uv_constraint, rank1, window):
    # smoke test for learn_d_z_multi
    n_trials, n_channels, n_times = 2, 3, 30
    n_times_atom, n_atoms = 6, 4

    loss_params = dict(gamma=1, sakoe_chiba_band=10, ordar=10)

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj, times, uv_hat, z_hat, reg = learn_d_z_multi(
        X, n_atoms, n_times_atom, uv_constraint=uv_constraint, rank1=rank1,
        solver_d=solver_d, random_state=0, n_iter=30, eps=-np.inf,
        solver_z='l-bfgs', window=window, verbose=0, loss=loss,
        loss_params=loss_params)

    msg = "Cost function does not go down for uv_constraint {}".format(
        uv_constraint)

    try:
        assert np.sum(np.diff(pobj) > 1e-13) == 0, msg
    except AssertionError:
        import matplotlib.pyplot as plt
        plt.semilogy(pobj - np.min(pobj) + 1e-6)
        plt.title(msg)
        plt.show()
        raise
예제 #3
0
def test_patch_reconstruction_error():
    rng = check_random_state(42)
    n_times_atoms, n_times = 21, 128
    n_atoms = 3
    n_trials, n_channels = 29, 7
    n_times_valid = n_times - n_times_atoms + 1
    density = 0.1
    z = sparse.random(n_atoms * n_trials,
                      n_times_valid,
                      density,
                      random_state=rng).toarray().reshape(
                          (n_trials, n_atoms, n_times_valid))
    uv = rng.randn(n_atoms, n_channels + n_times_atoms)

    X = construct_X_multi(z, D=uv, n_channels=n_channels)

    from alphacsc.utils.dictionary import _patch_reconstruction_error

    rec = _patch_reconstruction_error(X, z, uv)
    assert rec.shape == (n_trials, n_times_valid)
    assert_allclose(rec, 0)

    uv = rng.randn(n_atoms, n_channels + n_times_atoms)
    rec = _patch_reconstruction_error(X, z, uv)
    X_hat = construct_X_multi(z, D=uv, n_channels=n_channels)

    for i in range(10):
        for j in range(10):
            assert np.isclose(
                rec[i, j],
                np.sum((X_hat[i, :, j:j + n_times_atoms] -
                        X[i, :, j:j + n_times_atoms])**2))
예제 #4
0
def test_transformers(klass):
    # smoke test for transformer classes
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4

    if klass == OnlineCDL:
        kwargs = dict(batch_selection='cyclic')
    else:
        kwargs = dict()

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    cdl = klass(n_atoms, n_times_atom, uv_constraint='separate', rank1=True,
                solver_d='alternate_adaptive', random_state=0, n_iter=10,
                eps=-np.inf, solver_z='l-bfgs', window=True, verbose=0,
                **kwargs)
    cdl.fit(X)
    z = cdl.transform(X)
    Xt = cdl.transform_inverse(z)
    assert Xt.shape == X.shape

    msg = "Cost function does not go down for %s" % klass
    assert np.sum(np.diff(cdl.pobj_) > 1e-13) == 0, msg

    attributes = [
        'D_hat_', 'uv_hat_', 'u_hat_', 'v_hat_', 'z_hat_', 'pobj_', 'times_'
    ]
    for attribute in attributes:
        getattr(cdl, attribute)
예제 #5
0
def test_online_partial_fit(rank1, alpha):
    # Ensure that partial fit reproduce the behavior of the online algorithm if
    # feed with the same batch size and order.
    n_trials, n_channels, n_times = 10, 3, 100
    n_times_atom, n_atoms = 6, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)

    # The initial regularization is different for fit and partial_fit. It is
    # computed in batch mode for fit and with the first mini-batch in
    # partial_fit.
    params = dict(n_atoms=n_atoms,
                  n_times_atom=n_times_atom,
                  n_iter=n_trials,
                  D_init="random",
                  lmbd_max="fixed",
                  rank1=rank1,
                  batch_size=1,
                  batch_selection='cyclic',
                  alpha=alpha,
                  random_state=12)

    cdl_fit = OnlineCDL(**params)
    cdl_partial = OnlineCDL(**params)

    cdl_fit.fit(X)
    for x in X:
        cdl_partial.partial_fit(x[None])

    assert np.allclose(cdl_fit._D_hat, cdl_partial._D_hat)
def test_online_learning():
    # smoke test for learn_d_z_multi
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj_0, _, _, _, _ = learn_d_z_multi(X,
                                         n_atoms,
                                         n_times_atom,
                                         uv_constraint="separate",
                                         solver_d="joint",
                                         random_state=0,
                                         n_iter=30,
                                         solver_z='l-bfgs',
                                         algorithm="batch",
                                         loss='l2')

    pobj_1, _, _, _, _ = learn_d_z_multi(X,
                                         n_atoms,
                                         n_times_atom,
                                         uv_constraint="separate",
                                         solver_d="joint",
                                         random_state=0,
                                         n_iter=30,
                                         solver_z='l-bfgs',
                                         algorithm="online",
                                         algorithm_params=dict(
                                             batch_size=n_trials, alpha=0),
                                         loss='l2')

    assert np.allclose(pobj_0, pobj_1)
def test_learn_d_z_multi_dicodile(window):
    pytest.importorskip('dicodile')
    # smoke test for learn_d_z_multi
    # XXX For DiCoDiLe, n_trials cannot be >1
    n_trials, n_channels, n_times = 1, 3, 30
    n_times_atom, n_atoms = 6, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj, times, uv_hat, z_hat, reg = learn_d_z_multi(X,
                                                      n_atoms,
                                                      n_times_atom,
                                                      uv_constraint='auto',
                                                      rank1=False,
                                                      solver_d='auto',
                                                      random_state=0,
                                                      n_iter=30,
                                                      eps=-np.inf,
                                                      solver_z='dicodile',
                                                      window=window,
                                                      verbose=0,
                                                      loss='l2',
                                                      loss_params=None)

    msg = "Cost function does not go down"

    try:
        assert np.sum(np.diff(pobj) > 1e-13) == 0, msg
    except AssertionError:
        import matplotlib.pyplot as plt
        plt.semilogy(pobj - np.min(pobj) + 1e-6)
        plt.title(msg)
        plt.show()
        raise
def test_window(rank1, solver_d, uv_constraint):
    # Smoke test that the parameter window does something
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)

    *_, uv_constraint_ = check_solver_and_constraints(rank1, solver_d,
                                                      uv_constraint)

    D_init = init_dictionary(X,
                             n_atoms,
                             n_times_atom,
                             rank1=rank1,
                             uv_constraint=uv_constraint_,
                             random_state=0)

    kwargs = dict(X=X,
                  n_atoms=n_atoms,
                  n_times_atom=n_times_atom,
                  verbose=0,
                  uv_constraint=uv_constraint,
                  solver_d=solver_d,
                  rank1=rank1,
                  random_state=0,
                  n_iter=1,
                  solver_z='l-bfgs',
                  D_init=D_init)
    res_False = learn_d_z_multi(window=False, **kwargs)
    res_True = learn_d_z_multi(window=True, **kwargs)

    assert not np.allclose(res_False[2], res_True[2])
예제 #9
0
def test_linear_operator():
    """Test linear operator."""
    n_times, n_atoms, n_times_atom = 64, 16, 32
    n_times_valid = n_times - n_times_atom + 1

    rng = check_random_state(42)
    ds = rng.randn(n_atoms, n_times_atom)
    some_sample_weights = np.abs(rng.randn(n_times))

    for sample_weights in [None, some_sample_weights]:
        gbc = partial(gram_block_circulant, ds=ds, n_times_valid=n_times_valid,
                      sample_weights=sample_weights)
        DTD_full = gbc(method='full')
        DTD_scipy = gbc(method='scipy')
        DTD_custom = gbc(method='custom')

        z = rng.rand(DTD_full.shape[1])
        assert np.allclose(DTD_full.dot(z), DTD_scipy.dot(z))
        assert np.allclose(DTD_full.dot(z), DTD_custom.dot(z))

        # test power iterations with linear operator
        mu, _ = linalg.eigh(DTD_full)
        for DTD in [DTD_full, DTD_scipy, DTD_custom]:
            mu_hat = power_iteration(DTD)
            assert np.allclose(np.max(mu), mu_hat, rtol=1e-2)
예제 #10
0
def test_solve_unit_norm():
    """Test solving constraint for ||d||^2 <= 1."""
    rng = check_random_state(42)

    n, p = 10, 10
    x = np.zeros(p)
    x[3] = 3.
    A = rng.randn(n, p)
    b = np.dot(A, x)
    lhs = np.dot(A.T, A)
    rhs = np.dot(A.T, b)

    x_hat, lambd_hat = solve_unit_norm_dual(lhs, rhs,
                                            np.array([10.]), debug=True)
    # warm start
    x_hat2, _ = solve_unit_norm_dual(lhs, rhs, lambd0=lambd_hat)

    assert linalg.norm(x_hat) - 1. < 1e-3
    assert linalg.norm(x_hat2) - 1. < 1e-3

    x_hat = solve_unit_norm_primal(lhs, rhs, d_hat0=rng.randn(p))
    assert linalg.norm(x_hat) - 1. < 1e-3

    # back to dual, for more than one atom
    x[7] = 5
    x_hat, lambd_hat = solve_unit_norm_dual(lhs, rhs, np.array([5., 10.]))
    assert linalg.norm(x_hat[:5]) - 1. < 1e-3
예제 #11
0
def test_ztz(use_whitening):
    n_atoms = 7
    n_trials = 3
    n_channels = 5
    n_times_valid = 500
    n_times_atom = 10
    n_times = n_times_valid + n_times_atom - 1
    random_state = None

    rng = check_random_state(random_state)

    X = rng.randn(n_trials, n_channels, n_times)
    z = rng.randn(n_trials, n_atoms, n_times_valid)
    D = rng.randn(n_atoms, n_channels, n_times_atom)

    if use_whitening:
        ar_model, X = whitening(X)
        zw = apply_whitening(ar_model, z, mode="full")
        ztz = compute_ztz(zw, n_times_atom)
        grad = np.zeros(D.shape)
        for t in range(n_times_atom):
            grad[:, :, t] = np.tensordot(ztz[:, :, t:t + n_times_atom],
                                         D[:, :, ::-1],
                                         axes=([1, 2], [0, 2]))
    else:
        ztz = compute_ztz(z, n_times_atom)
        grad = tensordot_convolve(ztz, D)
    cost = np.dot(D.ravel(), grad.ravel())

    X_hat = construct_X_multi(z, D)
    if use_whitening:
        X_hat = apply_whitening(ar_model, X_hat, mode="full")

    assert np.isclose(cost, np.dot(X_hat.ravel(), X_hat.ravel()))
예제 #12
0
def test_dz_opt_sparse_update():
    n_trials = 10
    n_atoms = 10
    n_times_valid = 100

    t0, t1 = 20, 50
    density = 0.05

    rng = check_random_state(42)

    beta = rng.randn(n_atoms, n_times_valid)
    norm_D = rng.randn(n_atoms)
    reg = 1

    for _ in range(n_trials):
        random_state = rng.randint(65565)
        z = sparse.random(n_atoms, n_times_valid, density=density,
                          random_state=random_state, format='lil')
        # Check that we correctly handle the case where there is no tk > t1
        # in the sparse matrix
        z[-2, t1:] = 0
        # Check that we correctly handle the case where there is no nnz value
        # in the segment for z
        z[-1, t0:t1] = 0

        tmp = np.maximum(-beta - reg, 0) / norm_D[:, None]
        dz_opt = rng.randn(n_atoms, n_times_valid)
        dz_opt_expected = dz_opt.copy()

        dz_opt_expected[:, t0:t1] = tmp[:, t0:t1] - z[:, t0:t1]

        dz_opt = update_dz_opt(z, beta, dz_opt, norm_D, reg, t0, t1)

        assert_allclose(dz_opt[:, t0:t1], dz_opt_expected[:, t0:t1])
예제 #13
0
def load_data(n_trials, n_times, std_noise=.3, display=False,
              random_state=None):
    """Simulate a dataset of n_trials oculo signal with facsimile nystagmus.

    Parameters
    ----------
    n_trials: int
        Number of signal generated
    n_times: int
        Length of the generated signals
    std_noise: float
        Standard deviation of the additive Gaussian white noise.
    display: boolean (default: False)
        If set to True, displays the resulting signal.
    random_state: int, RandomState or None (default: None)
        random_state for the random number generator

    """

    rng = check_random_state(random_state)

    # Sampling frequency
    s_freq = 1000
    # Mean saccades frequency
    saccad_freq = .5
    # Duration of the saccads in second
    dt_sigm = 0.1

    trends = np.zeros((n_trials, n_times))
    nystagmus = np.zeros((n_trials, n_times))
    nyst_patterns = np.zeros((n_trials, s_freq))
    for i in range(n_trials):
        print(f"Generate signals: {i/n_trials:7.2%}\r", end='', flush=True)
        # Generate the signal parameter
        # Nystagmus type
        nystagmus_type = rng.choice(NYSTAGMUS_TYPES)
        # Nystagmus Frequency
        nystagmus_freq = max(3, min(5+2*rng.randn(), 8))
        # Nystagmus amplitude
        nystagmus_amp = generate_amplitude('nystagmus', random_state=rng)
        # Amplitude of the saccads
        saccad_amp = generate_amplitude('saccad', random_state=rng)
        # Amplitude of the low freq component
        low_freq_amp = generate_amplitude('low_freq', random_state=rng)
        # Curvature of the jerk
        curv = max(0, min(0.5+0.25*rng.randn(), 1))

        args = dict(
            n_times=n_times, nystagmus_type=nystagmus_type, s_freq=s_freq,
            nystagmus_freq=nystagmus_freq, nystagmus_amp=nystagmus_amp,
            saccad_freq=saccad_freq, saccad_amp=saccad_amp,
            dt_sigm=dt_sigm, curv=curv, low_freq_amp=low_freq_amp,
            std_noise=std_noise, display=display, random_state=rng
        )
        # Generate the signal
        trends[i], nystagmus[i], nyst_pattern = generate_signal(**args)
        nyst_patterns[i, :len(nyst_pattern)] = nyst_pattern
    print(f"Generate signals: done".ljust(40))
    return trends, nystagmus, nyst_patterns
예제 #14
0
def test_update_z_sample_weights():
    """Test z update with weights."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    b_hat_0 = rng.randn(n_atoms * (n_times - n_times_atom + 1))

    # Having sample_weights all identical is equivalent to having
    # sample_weights=None and a scaled regularization
    factor = 1.6
    sample_weights = np.ones_like(X) * factor
    for solver in ('l_bfgs', 'ista', 'fista'):
        z_0 = update_z(X,
                       ds,
                       reg * factor,
                       n_times_atom,
                       solver=solver,
                       solver_kwargs=dict(factr=1e7, max_iter=50),
                       b_hat_0=b_hat_0.copy(),
                       sample_weights=sample_weights)
        z_1 = update_z(X,
                       ds,
                       reg,
                       n_times_atom,
                       solver=solver,
                       solver_kwargs=dict(factr=1e7, max_iter=50),
                       b_hat_0=b_hat_0.copy(),
                       sample_weights=None)
        assert_allclose(z_0, z_1, rtol=1e-4)

    # All solvers should give the same results
    sample_weights = np.abs(rng.randn(*X.shape))
    sample_weights /= sample_weights.mean()
    z_list = []
    for solver in ('l_bfgs', 'ista', 'fista'):
        z_hat = update_z(X,
                         ds,
                         reg,
                         n_times_atom,
                         solver=solver,
                         solver_kwargs=dict(factr=1e7, max_iter=2000),
                         b_hat_0=b_hat_0.copy(),
                         sample_weights=sample_weights)
        z_list.append(z_hat)
    assert_allclose(z_list[0][z != 0], z_list[1][z != 0], rtol=1e-3)
    assert_allclose(z_list[0][z != 0], z_list[2][z != 0], rtol=1e-3)

    # And using no sample weights should give different results
    z_hat = update_z(X,
                     ds,
                     reg,
                     n_times_atom,
                     solver=solver,
                     solver_kwargs=dict(factr=1e7, max_iter=2000),
                     b_hat_0=b_hat_0.copy(),
                     sample_weights=None)
    assert_raises(AssertionError, assert_allclose, z_list[0][z != 0],
                  z_hat[z != 0], 1e-3)
예제 #15
0
def test_uv_D():
    rng = check_random_state(42)
    n_times_atoms = 21
    n_atoms = 15
    n_channels = 30

    uv = rng.randn(n_atoms, n_channels + n_times_atoms)
    uv = prox_uv(uv, uv_constraint='separate', n_channels=n_channels)
    ds = get_D(uv, n_channels)
    uv_hat = get_uv(ds)

    assert_allclose(abs(uv / uv_hat), 1)
예제 #16
0
def test_DtD():
    n_atoms = 10
    n_channels = 5
    n_times_atom = 50
    random_state = 42

    rng = check_random_state(random_state)

    uv = rng.randn(n_atoms, n_channels + n_times_atom)
    D = get_D(uv, n_channels)

    assert np.allclose(compute_DtD(uv, n_channels=n_channels), compute_DtD(D))
예제 #17
0
def test_update_d():
    """Test vanilla d update."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)

    # This number of iteration is 1 in the general case, but needs to be
    # increased to compare with update_d
    n_iter_d_block = 5

    # All solvers should give the same results
    d_hat_0, _ = update_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init)
    d_hat_1, _ = update_d_block(X, z, n_times_atom, lambd0=None,
                                ds_init=ds_init, n_iter=n_iter_d_block)
    assert np.allclose(d_hat_0, d_hat_1, rtol=1e-5)
예제 #18
0
def test_sparse_convolve():
    rng = check_random_state(42)
    n_times = 128
    n_times_atoms = 21
    n_atoms = 3
    n_times_valid = n_times - n_times_atoms + 1
    density = 0.1
    zi = sparse.random(n_atoms, n_times_valid, density, random_state=rng)
    ds = rng.randn(n_atoms, n_times_atoms)
    zi = zi.toarray().reshape(n_atoms, n_times_valid)

    zd_0 = _dense_convolve(zi, ds)
    zd_1 = _sparse_convolve(zi, ds)
    zd_2 = _choose_convolve(zi, ds)
    assert_allclose(zd_0, zd_1, atol=1e-16)
    assert_allclose(zd_0, zd_2, atol=1e-16)
예제 #19
0
def test_learn_codes_atoms_sample_weights(func_d, solver_z):
    """Test weighted CSC."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)
    X += 0.1 * rng.randn(*X.shape)
    n_iter = 3
    reg = 0.1

    # sample_weights all equal to one is equivalent to sample_weights=None.
    sample_weights = np.ones_like(X)
    pobj_0, _, _, _ = learn_d_z(
        X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
        reg=reg, n_iter=n_iter, random_state=0, verbose=0,
        sample_weights=sample_weights, ds_init=ds_init)
    pobj_1, _, _, _ = learn_d_z(
        X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
        reg=reg, n_iter=n_iter, random_state=0, verbose=0,
        sample_weights=None, ds_init=ds_init)

    assert np.allclose(pobj_0, pobj_1)

    if getattr(func_d, "keywords", {}).get("projection") != 'primal':
        # sample_weights equal to 2 is equivalent to having twice the samples.
        # (with the regularization equal to zero)
        reg = 0.
        n_iter = 3
        n_duplicated = n_trials // 3
        sample_weights = np.ones_like(X)
        sample_weights[:n_duplicated] = 2
        X_duplicated = np.vstack([X[:n_duplicated], X])
        pobj_0, _, d_hat_0, z_hat_0 = learn_d_z(
            X, n_atoms, n_times_atom, func_d=func_d, solver_z=solver_z,
            reg=reg, n_iter=n_iter, random_state=0, verbose=0,
            sample_weights=sample_weights, ds_init=ds_init,
            solver_z_kwargs=dict(factr=1e9))
        pobj_1, _, d_hat_1, z_hat_1 = learn_d_z(
            X_duplicated, n_atoms, n_times_atom, func_d=func_d,
            solver_z=solver_z, reg=reg, n_iter=n_iter, random_state=0,
            verbose=0, sample_weights=None, ds_init=ds_init,
            solver_z_kwargs=dict(factr=1e9))

        pobj_1 /= pobj_0[0]
        pobj_0 /= pobj_0[0]
        assert np.allclose(pobj_0, pobj_1, rtol=0, atol=1e-3)
예제 #20
0
def test_update_d_sample_weights():
    """Test d update with weights."""
    rng = check_random_state(42)
    X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
    ds_init = rng.randn(n_atoms, n_times_atom)
    # we need noise to have different results with different sample_weights
    X += 0.1 * rng.randn(*X.shape)

    # This number of iteration is 1 in the general case, but needs to be
    # increased to compare with update_d
    n_iter = 5
    func_d_0 = partial(update_d_block, projection='dual', n_iter=n_iter)
    func_d_1 = partial(update_d_block, projection='primal', n_iter=n_iter,
                       solver_kwargs=dict(factr=1e3))
    func_d_list = [func_d_0, func_d_1, update_d]

    # Having sample_weights all identical is equivalent to having
    # sample_weights=None
    factor = 1.6
    sample_weights = np.ones_like(X) * factor
    for func_d in func_d_list:
        d_hat_0, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=sample_weights)
        d_hat_1, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=None)
        assert np.allclose(d_hat_0, d_hat_1, rtol=1e-5)

    # All solvers should give the same results
    sample_weights = np.abs(rng.randn(*X.shape))
    sample_weights /= sample_weights.mean()
    d_hat_list = []
    for func_d in func_d_list:
        d_hat, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                          sample_weights=sample_weights)
        d_hat_list.append(d_hat)
    for d_hat in d_hat_list[1:]:
        assert np.allclose(d_hat, d_hat_list[0], rtol=1e-5)

    # And using no sample weights should give different results
    for func_d in func_d_list:
        d_hat_2, _ = func_d(X, z, n_times_atom, lambd0=None, ds_init=ds_init,
                            sample_weights=None)
        with pytest.raises(AssertionError):
            assert np.allclose(d_hat, d_hat_2, 1e-7)
예제 #21
0
def test_construct_X():
    rng = check_random_state(42)
    n_times_atoms, n_times = 21, 128
    n_atoms = 3
    n_trials, n_channels = 29, 7
    n_times_valid = n_times - n_times_atoms + 1
    density = 0.1
    zi = sparse.random(n_atoms * n_trials,
                       n_times_valid,
                       density,
                       random_state=rng).toarray().reshape(
                           (n_trials, n_atoms, n_times_valid))
    uv = rng.randn(n_atoms, n_channels + n_times_atoms)
    ds = get_D(uv, n_channels)

    X_uv = construct_X_multi(zi, D=uv, n_channels=n_channels)
    X_ds = construct_X_multi(zi, D=ds)

    assert_allclose(X_uv, X_ds, atol=1e-16)
예제 #22
0
def test_unbiased_z_hat(solver_z):
    n_trials, n_channels, n_times = 2, 3, 30
    n_times_atom, n_atoms = 6, 4

    loss_params = dict(gamma=1, sakoe_chiba_band=10, ordar=10)

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)

    _, _, _, z_hat, _ = learn_d_z_multi(X,
                                        n_atoms,
                                        n_times_atom,
                                        uv_constraint='auto',
                                        rank1=False,
                                        solver_d='auto',
                                        random_state=0,
                                        unbiased_z_hat=False,
                                        n_iter=1,
                                        eps=-np.inf,
                                        solver_z=solver_z,
                                        window=False,
                                        verbose=0,
                                        loss='l2',
                                        loss_params=loss_params)

    _, _, _, z_hat_unbiased, _ = learn_d_z_multi(X,
                                                 n_atoms,
                                                 n_times_atom,
                                                 uv_constraint='auto',
                                                 rank1=False,
                                                 solver_d='auto',
                                                 random_state=0,
                                                 unbiased_z_hat=True,
                                                 n_iter=1,
                                                 eps=-np.inf,
                                                 solver_z=solver_z,
                                                 window=False,
                                                 verbose=0,
                                                 loss='l2',
                                                 loss_params=loss_params)

    assert np.all(z_hat_unbiased[z_hat == 0] == 0)
예제 #23
0
def test_sparse_convolve_uv():
    rng = check_random_state(42)
    n_times = 128
    n_channels = 5
    n_times_atom = 21
    n_atoms = 3
    n_times_valid = n_times - n_times_atom + 1
    density = 0.1
    zi_lil = sparse.random(n_atoms,
                           n_times_valid,
                           density,
                           format='lil',
                           random_state=rng)
    ds = rng.randn(n_atoms, n_channels + n_times_atom)
    zi = zi_lil.toarray().reshape(n_atoms, n_times_valid)

    zd_0 = _dense_convolve_multi_uv(zi, ds, n_channels)
    zd_1 = np.zeros_like(zd_0)
    zd_1 = cython_code._fast_sparse_convolve_multi_uv(zi_lil, ds, n_channels)
    assert_allclose(zd_0, zd_1, atol=1e-16)
예제 #24
0
def rng():
    return check_random_state(42)
예제 #25
0
def generate_signal(n_times=5000, s_freq=1000, nystagmus_type="pendular",
                    nystagmus_freq=4, curv=0, saccad_freq=.5, dt_sigm=0.1,
                    std_noise=.3, nystagmus_amp=MEAN_AMPLITUDES['nystagmus'],
                    saccad_amp=MEAN_AMPLITUDES['saccad'],
                    low_freq_amp=MEAN_AMPLITUDES['low_freq'],
                    display=False, random_state=None):
    """Generate a fac-simile of a nystagmus signal

    In this helper, all amplitude are measured as the standard deviation of the
    signal.

    Parameters
    ----------
    n_times: int (default: 5000)
        Length of the generated signal.
    s_freq: float (default: 1000)
        Sampling frequency for the signals
    nystagmus_type: str (default: 'pendular)
        Type of nystagmic pattern included in the signal. This should be one
        of {NYSTAGMUS_TYPES}.
    nystagmus_freq: float (default: 4)
        Frequency of the nystagmus signal.
    curv: float (default: 0)
        Curvature of the jerk patterns for the nystagmus.
    saccad_freq: float (default: .5)
        Frequency of the saccad signal.
    dt_sigm: float (default: .5)
        Deviation of the sigmoid used to generate the saccad signal. The
        saccad duraction will be 1 / dt_sigm
    std_noise: float (default: 3)
        Amplitude of the noise.
    nystagmus_amp: float (default: 3)
        Amplitude of the nystagmus signal.
    saccad_amp: float (default: 20)
        Amplitude of the saccad signal.
    low_freq_amp: float (default: 5)
        Amplitude of the generated low frequency signal.
    display: boolean (default: False)
        If set to True, displays the resulting signal.
    random_state: int, RandomState or None (default: None)
        random_state for the random number generator

    """

    rng = check_random_state(random_state)

    t = np.arange(n_times) / s_freq

    # Generate a low frequency signal by generating low frequency Fourier
    # coefficients.
    max_freq_idx = max(1, int(n_times / (2 * s_freq)))
    freq_signal = np.zeros(n_times // 2 + 1, dtype=np.complex)
    for i in range(max_freq_idx):
        freq_signal[i] = rng.randn() + rng.randn() * 1j
    signal = np.fft.irfft(freq_signal)
    signal -= signal.mean()

    if np.std(signal) > 1e-5:
        signal *= low_freq_amp / np.std(signal)

    # Generate saccades location, amplitude, and sign
    end_last_sacc = 0
    saccs = []
    while end_last_sacc < n_times / s_freq:
        sign = 2 * (rng.rand() > .5) - 1
        amp = 1.5 + .5 * rng.randn()
        dt = rng.exponential(saccad_freq)
        start = end_last_sacc + dt
        end_last_sacc = start + dt_sigm
        saccs.append((start, end_last_sacc, sign, amp))

    # Generate the saccade signals as sigmoids with
    saccsignal = np.zeros(n_times)
    for i, tt in enumerate(t):
        for start, end, sign, amp in saccs:
            if start < tt:
                saccsignal[i] += sign * amp * sigmoid(i-start-50, dt_sigm)

    if np.std(saccsignal) > 1e-5:
        saccsignal *= saccad_amp / np.std(saccsignal)

    # Create the gaze signal as the sum of the low-freq signal, the saccad
    # signal and a noise term.
    signal = signal + saccsignal
    signal += std_noise * rng.randn(n_times)

    # Create the nystagmus signal depending on its type .
    t_pattern = np.arange(s_freq/nystagmus_freq) / s_freq * nystagmus_freq
    if nystagmus_type == "pendular":
        nyst = np.cos(2 * np.pi * t * nystagmus_freq)
        nyst_pattern = np.cos(2 * np.pi * t_pattern)
    elif "jerk" in nystagmus_type:
        sign = -1 if "_down_" in nystagmus_type else 1
        nyst = sign * jerk((t * nystagmus_freq % 1), curv)
        nyst_pattern = sign * jerk((t_pattern % 1), curv)
        if "_fs_" in nystagmus_type:
            nyst = nyst[::-1]
            nyst_pattern = nyst_pattern[::-1]
    else:
        raise NotImplementedError(f"Unknown nystagmus type {nystagmus_type}. "
                                  f"Should be one of {NYSTAGMUS_TYPES}.")
    nyst *= nystagmus_amp / np.std(nyst)

    if display:
        print(nystagmus_freq)
        plt.plot(nyst)
        # plt.plot(signal + nyst)
        plt.plot(nyst_pattern)
        plt.show()

    return signal, nyst, nyst_pattern
예제 #26
0
def load_data(n_trials=40,
              n_channels=1,
              T=4,
              sigma=.05,
              sfreq=300,
              f_noise=True,
              random_state=None,
              n_jobs=4):
    """Simulate data following the convolutional model

    Parameters
    ----------
    n_trials : int
        Number of simulated signals
    n_channels : int
        Number of channels in the signals
    T : float
        Length of the generated signals, in seconds. The generated signal
        will have length n_times * sfreq
    sigma : float
        Additive noise level in the signal
    sfreq : int
        Sampling rate for the signal
    f_noise : boolean
        If set to True, use MNE empty room data as a noise in the signal
    random_state : int or None
        State to seed the random number generator
    n_jobs : int
        Number of processes used to filter the f_noise

    Return
    ------
    signal : (n_trials, n_channels, n_times)
    """
    rng = check_random_state(random_state)

    freq = 10  # Generate 10Hz mu-wave
    phase_shift = rng.rand(n_trials, 1) * sfreq * np.pi
    t0 = 1.8 + .4 * rng.rand(n_trials, 1)
    L = 1. + .5 * rng.rand(n_trials, 1)
    t = (np.linspace(0., T, int(T * sfreq)))
    mask = (t[None] > t0) * (t[None] < t0 + L)
    t *= 2 * np.pi * freq
    t = t[None] + phase_shift
    # plt.plot(t.T)
    noisy_phase = .5 * np.sin(t / (3 * np.sqrt(2)))
    phi_t = t + noisy_phase
    signal = np.sin(phi_t + np.cos(phi_t) * mask)

    U = rng.randn(1, n_channels, 1)
    U_mu = rng.randn(1, n_channels, 1)
    U /= np.sqrt((U * U).sum()) * np.sign(U.sum())
    U_mu /= np.sqrt((U_mu * U_mu).sum()) * np.sign(U_mu.sum())
    signal = (U + (U_mu - U) * mask[:, None]) * signal[:, None]

    # signal += sigma * rng.randn(*signal.shape)

    # generate noise
    if f_noise:
        data_path = os.path.join(mne.datasets.sample.data_path(), 'MEG',
                                 'sample')
        raw = mne.io.read_raw_fif(os.path.join(data_path, 'ernoise_raw.fif'),
                                  preload=True)

        raw.pick_types(meg='mag')
        nyquist = raw.info['sfreq'] / 2.
        raw.notch_filter(np.arange(60, nyquist - 10., 60), n_jobs=n_jobs)
        raw.filter(.5, None, n_jobs=n_jobs)
        X = raw[:][0]
        X /= X.std(axis=1, keepdims=True)
        max_channels, T_max = X.shape

        channels = rng.choice(max_channels, n_channels)
        L_sig = int(T * sfreq)
        for i in range(n_trials):
            t = rng.choice(T_max - L_sig)
            signal[i] += sigma * X[channels, t:t + L_sig]
    else:
        signal += sigma * rng.randn(signal.shape)

    signal *= tukey(signal.shape[-1], alpha=0.05)[None, None, :]

    info = {}
    info['u'] = np.r_[U[:, :, 0], U_mu[:, :, 0]]

    return signal, info
예제 #27
0
def generate_D_init(n_atoms, n_channels, n_times_atom, random_state):
    rng = check_random_state(random_state)
    return rng.randn(n_atoms, n_channels + n_times_atom)
예제 #28
0
파일: swm.py 프로젝트: wmvanvliet/alphacsc
def sliding_window_matching(x,
                            L,
                            G,
                            max_iterations=500,
                            T=1,
                            window_starts_custom=None,
                            random_state=None):
    """Find recurring patterns in a time series using SWM algorithm.

    Parameters
    ----------
    x : array-like 1d
        voltage time series
    L : float
        window length (seconds)
    G : float
        minimum window spacing (seconds)
    T : float
        temperature parameter. Controls probability of accepting a new window
    max_iterations : int
        Maximum number of iterations of potential changes in window placement
    window_starts_custom : np.ndarray (1d)
        Pre-set locations of initial windows (instead of evenly spaced by 2G)
    random_state : int
        The random state

    Returns
    -------
    avg_window : ndarray (1d)
        The average waveform in x.
    window_starts : ndarray (1d)
        Indices at which each window begins for the final set of windows
    J : np.ndarray (1d)
        Cost function value at each iteration

    References
    ----------
    Gips, B., Bahramisharif, A., Lowet, E., Roberts, M. J., de Weerd, P.,
    Jensen, O., & van der Eerden, J. (2017). Discovering recurring
    patterns in electrophysiological recordings.
    Journal of Neuroscience Methods, 275, 66-79.
    MATLAB code: https://github.com/bartgips/SWM

    Notes
    -----
    * Apply a highpass filter if looking at high frequency activity,
      so that it does not converge on a low frequency motif
    * L and G should be chosen to be about the size of the motif of interest
    """
    rng = check_random_state(random_state)

    # Initialize window positions, separated by 2*G
    if window_starts_custom is None:
        window_starts = np.arange(0, len(x) - L, 2 * G)
    else:
        window_starts = window_starts_custom
    N_windows = len(window_starts)

    # Calculate initial cost
    J = np.zeros(max_iterations)
    J[0] = _compute_J(x, window_starts, L)

    # Randomly sample windows with replacement
    random_window_idx = rng.choice(range(N_windows), size=max_iterations)

    # For each iteration, randomly replace a window with a new window
    # to improve cross-window similarity
    for idx in range(1, max_iterations):
        # Pick a random window position
        window_idx_replace = random_window_idx[idx]

        # Find a new allowed position for the window
        window_starts_temp = np.copy(window_starts)
        window_starts_temp[window_idx_replace] = _find_new_windowidx(
            window_starts, G, L,
            len(x) - L, rng)

        # Calculate the cost with replaced windows
        J_temp = _compute_J(x, window_starts_temp, L)

        # Calculate the change in cost function
        deltaJ = J_temp - J[idx - 1]

        # Calculate the acceptance probability
        p_accept = np.exp(-deltaJ / float(T))

        # Accept update to J with a certain probability
        if rng.rand() < p_accept:
            J[idx] = J_temp
            # Update X
            window_starts = window_starts_temp
        else:
            J[idx] = J[idx - 1]

        print('[iter %03d] Cost function: %s' % (idx, J[idx]))

    # Calculate average window
    avg_window = np.zeros(L)
    for w in range(N_windows):
        avg_window += x[window_starts[w]:window_starts[w] + L]
    avg_window = avg_window / float(N_windows)

    return avg_window, window_starts, J
예제 #29
0
def learn_atoms(X,
                n_atoms,
                n_times_atom,
                n_iter=10,
                max_shift=11,
                random_state=None):
    """Learn atoms using the MoTIF algorithm.

    Parameters
    ----------
    X : array, shape (n_trials, n_times)
        The data on which to apply MoTIF.
    n_atoms : int
        The number of atoms.
    n_times_atom : int
        The support of the atoms
    n_iter : int
        The number of iterations
    max_shift : int
        The maximum allowable shift for the atoms.
    random_state : int | None
        The random initialization.
    """

    rng = check_random_state(random_state)

    n_trials, n_times = X.shape

    atoms = rng.rand(n_atoms, n_times_atom)
    corrs = np.zeros(n_trials)
    match = np.zeros((n_atoms, n_trials), dtype=np.int)

    # loop through atoms
    for k in range(n_atoms):

        aligned_data = np.zeros((n_times_atom, n_trials))

        # compute Bk
        B = np.zeros((n_times_atom, n_times_atom), order='F')
        for l in range(k):
            for p in np.arange(max_shift):
                atom_shifted = np.roll(atoms[l], -p)[np.newaxis, :]
                # B += np.dot(atom_shifted.T, atom_shifted)
                B = blas.dger(1,
                              atom_shifted,
                              atom_shifted,
                              a=B,
                              overwrite_a=1)

        # make B invertible by adding a full-rank matrix
        B += np.eye(B.shape[0]) * np.finfo(np.float32).eps

        for i in range(n_iter):
            print('[seed %s] Atom %d Iteration %d' % (random_state, k, i))
            # loop through training data
            for n in range(n_trials):
                # ### STEP 1: Find out where the data and atom align ####

                # which of these to use for template matching?
                vec1 = (X[n] - np.mean(X[n])) / (np.std(X[n]) * len(X[n]))
                vec2 = (atoms[k] - np.mean(atoms[k])) / np.std(atoms[k])
                tmp = np.abs(correlate(vec1, vec2, 'same'))

                offset = n_times_atom // 2
                match[k, n] = tmp[offset:-offset].argmax() + offset
                corrs[n] = tmp[match[k, n]]

                # aligned_data[:, n] = np.roll(X[n], -shift[n])[:n_times_atom]
                aligned_data[:, n] = X[n, match[k, n] - offset:match[k, n] +
                                       offset].copy()

            # ### STEP 2: Solve the generalized eigenvalue problem ####
            A = np.dot(aligned_data, aligned_data.T).copy()
            if k == 0:
                B = None
            e, U = eigh(A, B)
            # e, U = eigh(A)

            atoms[k, :] = U[:, -1] / np.linalg.norm(U[:, -1])
    return atoms
예제 #30
0
def load_data(n_trials=40,
              n_channels=1,
              n_times=6,
              sigma=.05,
              sfreq=300,
              f_noise=True,
              random_state=None,
              n_jobs=4):
    """Simulate data with 10Hz mu-wave and 10Hz oscillations.

    Parameters
    ----------
    n_trials : int
        Number of simulated signals
    n_channels : int
        Number of channels in the signals
    n_times : float
        Length of the generated signals, in seconds. The generated signal
        will have length n_times * sfreq
    sigma : float
        Additive noise level in the signal
    sfreq : int
        Sampling rate for the signal
    f_noise : boolean
        If set to True, use MNE empty room data as a noise in the signal
    n_jobs : int
        Number of processes used to filter the f_noise
    random_state : int | None
        State to seed the random number generator

    Return
    ------
    X : ndarray, shape (n_trials, n_channels, n_times)
        Simulated 10Hz sinusoidal signals with 10Hz mu-wave between 2sec and
        [3, 3.5]sec. some random phases are applied to the different part of
        the signal.
    info : dict
        Contains the topomap 'u' associated to each component of the signal.
    """
    rng = check_random_state(random_state)

    freq = 10  # Generate 10Hz mu-wave
    phase_shift = rng.rand(n_trials, 1) * sfreq * np.pi
    t0 = 2
    L = 1. + .5 * rng.rand(n_trials, 1)
    t = (np.linspace(0., n_times, int(n_times * sfreq)))
    mask = (t[None] > t0) * (t[None] < t0 + L)
    t *= 2 * np.pi * freq
    t = t[None] + phase_shift
    noisy_phase = .5 * np.sin(t / (3 * np.sqrt(2)))
    phi_t = t + noisy_phase
    signal = np.sin(phi_t + np.cos(phi_t) * mask)

    U = rng.randn(1, n_channels, 1)
    U_mu = rng.randn(1, n_channels, 1)
    U /= np.sqrt((U * U).sum()) * np.sign(U.sum())
    U_mu /= np.sqrt((U_mu * U_mu).sum()) * np.sign(U_mu.sum())
    signal = (U + (U_mu - U) * mask[:, None]) * signal[:, None]

    # signal += sigma * rng.randn(*signal.shape)

    # generate noise from empty room noise inthe mne.sample dataset
    if f_noise:
        data_path = os.path.join(mne.datasets.sample.data_path(), 'MEG',
                                 'sample')
        raw = mne.io.read_raw_fif(os.path.join(data_path, 'ernoise_raw.fif'),
                                  preload=True)

        raw.pick_types(meg='mag')
        nyquist = raw.info['sfreq'] / 2.
        raw.notch_filter(np.arange(60, nyquist - 10., 60), n_jobs=n_jobs)
        raw.filter(.5, None, n_jobs=n_jobs)
        X = raw[:][0]
        X /= X.std(axis=1, keepdims=True)
        max_channels, T_max = X.shape

        channels = rng.choice(max_channels, n_channels)
        L_sig = int(n_times * sfreq)
        for i in range(n_trials):
            t = rng.choice(T_max - L_sig)
            signal[i] += sigma * X[channels, t:t + L_sig]
    else:
        signal += sigma * rng.randn(signal.shape)

    signal *= tukey(signal.shape[-1], alpha=0.05)[None, None, :]

    info = {}
    info['u'] = np.r_[U[:, :, 0], U_mu[:, :, 0]]

    return signal, info