Ejemplo n.º 1
0
def test_window(rank1, solver_d, uv_constraint):
    # Smoke test that the parameter window does something
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)

    *_, uv_constraint_ = check_solver_and_constraints(rank1, solver_d,
                                                      uv_constraint)

    D_init = init_dictionary(X,
                             n_atoms,
                             n_times_atom,
                             rank1=rank1,
                             uv_constraint=uv_constraint_,
                             random_state=0)

    kwargs = dict(X=X,
                  n_atoms=n_atoms,
                  n_times_atom=n_times_atom,
                  verbose=0,
                  uv_constraint=uv_constraint,
                  solver_d=solver_d,
                  rank1=rank1,
                  random_state=0,
                  n_iter=1,
                  solver_z='l-bfgs',
                  D_init=D_init)
    res_False = learn_d_z_multi(window=False, **kwargs)
    res_True = learn_d_z_multi(window=True, **kwargs)

    assert not np.allclose(res_False[2], res_True[2])
Ejemplo n.º 2
0
def test_online_learning():
    # smoke test for learn_d_z_multi
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj_0, _, _, _, _ = learn_d_z_multi(X,
                                         n_atoms,
                                         n_times_atom,
                                         uv_constraint="separate",
                                         solver_d="joint",
                                         random_state=0,
                                         n_iter=30,
                                         solver_z='l-bfgs',
                                         algorithm="batch",
                                         loss='l2')

    pobj_1, _, _, _, _ = learn_d_z_multi(X,
                                         n_atoms,
                                         n_times_atom,
                                         uv_constraint="separate",
                                         solver_d="joint",
                                         random_state=0,
                                         n_iter=30,
                                         solver_z='l-bfgs',
                                         algorithm="online",
                                         algorithm_params=dict(
                                             batch_size=n_trials, alpha=0),
                                         loss='l2')

    assert np.allclose(pobj_0, pobj_1)
Ejemplo n.º 3
0
def test_learn_d_z_multi_dicodile(window):
    pytest.importorskip('dicodile')
    # smoke test for learn_d_z_multi
    # XXX For DiCoDiLe, n_trials cannot be >1
    n_trials, n_channels, n_times = 1, 3, 30
    n_times_atom, n_atoms = 6, 4

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj, times, uv_hat, z_hat, reg = learn_d_z_multi(X,
                                                      n_atoms,
                                                      n_times_atom,
                                                      uv_constraint='auto',
                                                      rank1=False,
                                                      solver_d='auto',
                                                      random_state=0,
                                                      n_iter=30,
                                                      eps=-np.inf,
                                                      solver_z='dicodile',
                                                      window=window,
                                                      verbose=0,
                                                      loss='l2',
                                                      loss_params=None)

    msg = "Cost function does not go down"

    try:
        assert np.sum(np.diff(pobj) > 1e-13) == 0, msg
    except AssertionError:
        import matplotlib.pyplot as plt
        plt.semilogy(pobj - np.min(pobj) + 1e-6)
        plt.title(msg)
        plt.show()
        raise
Ejemplo n.º 4
0
def test_learn_d_z_multi(loss, solver_d, uv_constraint, rank1, window):
    # smoke test for learn_d_z_multi
    n_trials, n_channels, n_times = 2, 3, 30
    n_times_atom, n_atoms = 6, 4

    loss_params = dict(gamma=1, sakoe_chiba_band=10, ordar=10)

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)
    pobj, times, uv_hat, z_hat, reg = learn_d_z_multi(
        X, n_atoms, n_times_atom, uv_constraint=uv_constraint, rank1=rank1,
        solver_d=solver_d, random_state=0, n_iter=30, eps=-np.inf,
        solver_z='l-bfgs', window=window, verbose=0, loss=loss,
        loss_params=loss_params)

    msg = "Cost function does not go down for uv_constraint {}".format(
        uv_constraint)

    try:
        assert np.sum(np.diff(pobj) > 1e-13) == 0, msg
    except AssertionError:
        import matplotlib.pyplot as plt
        plt.semilogy(pobj - np.min(pobj) + 1e-6)
        plt.title(msg)
        plt.show()
        raise
Ejemplo n.º 5
0
def run_multichannel_gcd(X, ds_init, reg, n_iter, random_state, label):
    if X.ndim == 2:
        n_atoms, n_times_atom = ds_init.shape
        ds_init = np.c_[np.ones((n_atoms, 1)), ds_init]
        X = X[:, None, :]
    else:
        n_atoms, n_channels, n_times_atom = ds_init.shape
        ds_init = get_uv(ds_init)  # project init to rank 1

    solver_z_kwargs = dict(max_iter=2, tol=1e-3)
    pobj, times, d_hat, z_hat, reg = learn_d_z_multi(
        X,
        n_atoms,
        n_times_atom,
        solver_d='alternate_adaptive',
        solver_z="lgcd",
        uv_constraint='separate',
        eps=-np.inf,
        solver_z_kwargs=solver_z_kwargs,
        reg=reg,
        solver_d_kwargs=dict(max_iter=100),
        n_iter=n_iter,
        random_state=random_state,
        raise_on_increase=False,
        D_init=ds_init,
        n_jobs=1,
        verbose=verbose)

    # remove the ds init duration
    times[0] = 0

    return pobj[::2], np.cumsum(times)[::2], d_hat, z_hat
Ejemplo n.º 6
0
def run_multichannel_gcd_fullrank(X, ds_init, reg, n_iter, random_state,
                                  label):
    assert X.ndim == 3
    n_atoms, n_channels, n_times_atom = ds_init.shape

    solver_z_kwargs = dict(max_iter=2, tol=1e-3)
    pobj, times, d_hat, z_hat, reg = learn_d_z_multi(
        X,
        n_atoms,
        n_times_atom,
        solver_d='fista',
        solver_z="lgcd",
        uv_constraint='separate',
        eps=-np.inf,
        solver_z_kwargs=solver_z_kwargs,
        reg=reg,
        solver_d_kwargs=dict(max_iter=100),
        n_iter=n_iter,
        random_state=random_state,
        raise_on_increase=False,
        D_init=ds_init,
        n_jobs=1,
        verbose=verbose,
        rank1=False)

    # remove the ds init duration
    times[0] = 0

    return pobj[::2], np.cumsum(times)[::2], d_hat, z_hat
Ejemplo n.º 7
0
def test_init_random(rank1, solver_d, uv_constraint):
    """"""
    n_trials, n_channels, n_times = 5, 3, 100
    n_times_atom, n_atoms = 10, 4

    _, uv_constraint_ = check_solver_and_constraints(rank1, solver_d,
                                                     uv_constraint)

    if rank1:
        expected_shape = (n_atoms, n_channels + n_times_atom)
        prox = functools.partial(prox_uv,
                                 uv_constraint=uv_constraint_,
                                 n_channels=n_channels)
    else:
        expected_shape = (n_atoms, n_channels, n_times_atom)
        prox = prox_d

    X = np.random.randn(n_trials, n_channels, n_times)

    # Test that init_dictionary is doing what we expect for D_init random
    random_state = 42
    D_hat = init_dictionary(X,
                            n_atoms,
                            n_times_atom,
                            D_init='random',
                            rank1=rank1,
                            uv_constraint=uv_constraint_,
                            random_state=random_state)
    rng = check_random_state(random_state)

    D_init = rng.randn(*expected_shape)
    D_init = prox(D_init)
    assert_allclose(D_hat,
                    D_init,
                    err_msg="The random state is not correctly "
                    "used in init_dictionary .")

    # Test that learn_d_z_multi is doing what we expect for D_init random
    random_state = 27
    _, _, D_hat, _, _ = learn_d_z_multi(X,
                                        n_atoms,
                                        n_times_atom,
                                        D_init='random',
                                        n_iter=0,
                                        rank1=rank1,
                                        solver_d=solver_d,
                                        uv_constraint=uv_constraint,
                                        random_state=random_state)

    rng = check_random_state(random_state)
    D_init = rng.randn(*expected_shape)
    D_init = prox(D_init)
    assert_allclose(D_hat,
                    D_init,
                    err_msg="The random state is not correctly "
                    "used in learn_d_z_multi.")
Ejemplo n.º 8
0
def test_unbiased_z_hat(solver_z):
    n_trials, n_channels, n_times = 2, 3, 30
    n_times_atom, n_atoms = 6, 4

    loss_params = dict(gamma=1, sakoe_chiba_band=10, ordar=10)

    rng = check_random_state(42)
    X = rng.randn(n_trials, n_channels, n_times)

    _, _, _, z_hat, _ = learn_d_z_multi(X,
                                        n_atoms,
                                        n_times_atom,
                                        uv_constraint='auto',
                                        rank1=False,
                                        solver_d='auto',
                                        random_state=0,
                                        unbiased_z_hat=False,
                                        n_iter=1,
                                        eps=-np.inf,
                                        solver_z=solver_z,
                                        window=False,
                                        verbose=0,
                                        loss='l2',
                                        loss_params=loss_params)

    _, _, _, z_hat_unbiased, _ = learn_d_z_multi(X,
                                                 n_atoms,
                                                 n_times_atom,
                                                 uv_constraint='auto',
                                                 rank1=False,
                                                 solver_d='auto',
                                                 random_state=0,
                                                 unbiased_z_hat=True,
                                                 n_iter=1,
                                                 eps=-np.inf,
                                                 solver_z=solver_z,
                                                 window=False,
                                                 verbose=0,
                                                 loss='l2',
                                                 loss_params=loss_params)

    assert np.all(z_hat_unbiased[z_hat == 0] == 0)
Ejemplo n.º 9
0
def run_multichannel(X, D_init, reg, n_iter, random_state,
                     label, n_channels):
    n_atoms, n_channels_n_times_atom = D_init.shape
    n_times_atom = n_channels_n_times_atom - n_channels

    solver_z_kwargs = dict(max_iter=500, tol=1e-1)
    return learn_d_z_multi(
        X, n_atoms, n_times_atom, reg=reg, n_iter=n_iter,
        uv_constraint='separate', rank1=True, D_init=D_init,
        solver_d='alternate_adaptive', solver_d_kwargs=dict(max_iter=50),
        solver_z="lgcd", solver_z_kwargs=solver_z_kwargs, use_sparse_z=False,
        name="rank1-{}-{}".format(n_channels, random_state),
        random_state=random_state, n_jobs=1, verbose=VERBOSE)
Ejemplo n.º 10
0
def run_one(reg, sigma, n_atoms, n_times_atom, max_n_channels, n_times_valid,
            n_iter, run_n_channels, random_state):
    """Run the benchmark for a given set of parameter."""

    X, uv, uv_init = get_signals(max_n_channels, n_times_atom, n_times_valid,
                                 sigma, random_state)

    reg_ = reg * get_lambda_max(X, uv_init).max()
    # reg_ *= run_n_channels

    uv_init_ = prox_uv(np.c_[uv_init[:, :run_n_channels],
                             uv_init[:, max_n_channels:]])
    uv_ = prox_uv(np.c_[uv[:, :run_n_channels], uv[:, max_n_channels:]],
                  uv_constraint='separate',
                  n_channels=max_n_channels)

    def cb(X, uv_hat, z_hat, pobj):
        it = len(pobj) // 2
        if it % 10 == 0:
            print("[channels{}] iter{} score sig={:.2e}: {:.3e}".format(
                run_n_channels, it, sigma, score_uv(uv_, uv_hat,
                                                    run_n_channels)))

    pobj, times, uv_hat, z_hat, reg = learn_d_z_multi(
        X[:, :run_n_channels, :],
        n_atoms,
        n_times_atom,
        random_state=random_state,
        # callback=cb,
        n_iter=n_iter,
        n_jobs=1,
        reg=reg_,
        uv_constraint='separate',
        solver_d='alternate_adaptive',
        solver_d_kwargs={'max_iter': 50},
        solver_z="lgcd",
        solver_z_kwargs=dict(tol=1e-3, maxiter=500),
        use_sparse_z=True,
        D_init=uv_init_,
        verbose=VERBOSE,
    )

    score = score_uv(uv_, uv_hat, run_n_channels)
    print("=" * 79 + "\n" +
          "[channels{}-{:.2e}-{}] iter {} score sig={:.2e}: {:.3e}\n".format(
              run_n_channels, reg, random_state,
              len(pobj) // 2, sigma, score) + "=" * 79)

    return random_state, sigma, run_n_channels, score, uv, uv_hat, reg
Ejemplo n.º 11
0
def run_multivariate(X, D_init, reg, n_iter, random_state,
                     label, n_channels):
    n_atoms, n_channels_n_times_atom = D_init.shape
    n_times_atom = n_channels_n_times_atom - n_channels
    D_init = get_D(D_init, n_channels)

    solver_z_kwargs = dict(max_iter=500, tol=1e-1)
    return learn_d_z_multi(
        X, n_atoms, n_times_atom, reg=reg, n_iter=n_iter,
        uv_constraint='separate', rank1=False, D_init=D_init,
        solver_d='l-bfgs', solver_d_kwargs=dict(max_iter=50),
        solver_z="lgcd", solver_z_kwargs=solver_z_kwargs, use_sparse_z=False,
        name="dense-{}-{}".format(n_channels, random_state),
        random_state=random_state, n_jobs=1, verbose=VERBOSE,
        raise_on_increase=False)
Ejemplo n.º 12
0
def test_init_array(rank1, solver_d, uv_constraint):
    n_trials, n_channels, n_times = 5, 3, 100
    n_times_atom, n_atoms = 10, 4

    _, uv_constraint_ = check_solver_and_constraints(rank1, solver_d,
                                                     uv_constraint)

    if rank1:
        expected_shape = (n_atoms, n_channels + n_times_atom)
        prox = functools.partial(prox_uv,
                                 uv_constraint=uv_constraint_,
                                 n_channels=n_channels)
    else:
        expected_shape = (n_atoms, n_channels, n_times_atom)
        prox = prox_d

    X = np.random.randn(n_trials, n_channels, n_times)

    # Test that init_dictionary is doing what we expect for D_init array
    D_init = np.random.randn(*expected_shape)
    D_hat = init_dictionary(X,
                            n_atoms,
                            n_times_atom,
                            D_init=D_init,
                            rank1=rank1,
                            uv_constraint=uv_constraint_)

    D_init = prox(D_init)
    assert_allclose(D_hat, D_init)
    assert id(D_hat) != id(D_init)

    # Test that learn_d_z_multi is doing what we expect for D_init array
    D_init = np.random.randn(*expected_shape)
    _, _, D_hat, _, _ = learn_d_z_multi(X,
                                        n_atoms,
                                        n_times_atom,
                                        D_init=D_init,
                                        n_iter=0,
                                        rank1=rank1,
                                        solver_d=solver_d,
                                        uv_constraint=uv_constraint)

    D_init = prox(D_init)
    assert_allclose(D_hat, D_init)