Beispiel #1
0
def test_warm_start(valid_support, atom_support, reg):
    tol = 1
    n_atoms = 7
    n_channels = 5
    random_state = 36

    rng = check_random_state(random_state)

    D = rng.randn(n_atoms, n_channels, *atom_support)
    D /= np.sqrt(np.sum(D * D, axis=(1, 2), keepdims=True))
    z = rng.randn(n_atoms, *valid_support)
    z *= (rng.rand(n_atoms, *valid_support) > .7)

    X = reconstruct(z, D)

    z_hat, *_ = dicod(X, D, reg=0, z0=z, tol=tol, n_workers=N_WORKERS,
                      max_iter=10000, verbose=VERBOSE)
    assert np.allclose(z_hat, z)

    X = rng.randn(*X.shape)

    z_hat, *_ = dicod(X, D, reg, z0=z, tol=tol, n_workers=N_WORKERS,
                      max_iter=100000, verbose=VERBOSE)
    beta, dz_opt, _ = _init_beta(X, D, reg, z_i=z_hat)
    assert np.all(dz_opt <= tol)
Beispiel #2
0
def test_freeze_support(valid_support, atom_support):
    tol = .5
    reg = 0
    n_atoms = 7
    n_channels = 5
    random_state = None

    sig_support = get_full_support(valid_support, atom_support)

    rng = check_random_state(random_state)

    D = rng.randn(n_atoms, n_channels, *atom_support)
    D /= np.sqrt(np.sum(D * D, axis=(1, 2), keepdims=True))
    z = rng.randn(n_atoms, *valid_support)
    z *= rng.rand(n_atoms, *valid_support) > .5

    X = rng.randn(n_channels, *sig_support)

    z_hat, *_ = dicod(X, D, reg, z0=0 * z, tol=tol, n_workers=N_WORKERS,
                      max_iter=1000, freeze_support=True, verbose=VERBOSE)
    assert np.all(z_hat == 0)

    z_hat, *_ = dicod(X, D, reg, z0=z, tol=tol, n_workers=N_WORKERS,
                      max_iter=1000, freeze_support=True, verbose=VERBOSE)

    assert np.all(z_hat[z == 0] == 0)
Beispiel #3
0
def run_without_soft_lock(n_atoms=25,
                          atom_support=(12, 12),
                          reg=.01,
                          tol=5e-2,
                          n_workers=100,
                          random_state=60):
    rng = np.random.RandomState(random_state)

    X = get_mandril()
    D_init = init_dictionary(X, n_atoms, atom_support, random_state=rng)
    lmbd_max = get_lambda_max(X, D_init).max()
    reg_ = reg * lmbd_max

    z_hat, *_ = dicod(X,
                      D_init,
                      reg_,
                      max_iter=1000000,
                      n_workers=n_workers,
                      tol=tol,
                      strategy='greedy',
                      verbose=1,
                      soft_lock='none',
                      z_positive=False,
                      timing=False)
    pobj = compute_objective(X, z_hat, D_init, reg_)
    z_hat = np.clip(z_hat, -1e3, 1e3)
    print("[DICOD] final cost : {}".format(pobj))

    X_hat = reconstruct(z_hat, D_init)
    X_hat = np.clip(X_hat, 0, 1)
    return X_hat, pobj
Beispiel #4
0
def run_one(T, L, K, d, noise_level, seed_pb, n_jobs, reg, tol, strategy,
            common_args):

    X, D_hat = simulate_data(T, L, K, d, noise_level, seed=seed_pb)
    lmbd_max = get_lambda_max(X, D_hat)
    reg_ = reg * lmbd_max

    n_seg = 1
    strategy_ = strategy
    if strategy == 'lgcd':
        n_seg = 'auto'
        strategy_ = "greedy"

    *_, pobj, _ = dicod(X,
                        D_hat,
                        reg_,
                        n_jobs=n_jobs,
                        tol=tol,
                        strategy=strategy_,
                        n_seg=n_seg,
                        **common_args)
    print(pobj)

    return ResultItem(reg=reg,
                      n_jobs=n_jobs,
                      strategy=strategy,
                      tol=tol,
                      seed=seed_pb,
                      pobj=pobj)
Beispiel #5
0
def run_one(n_workers, strategy, reg, n_times, tol, soft_lock, dicod_args,
            n_times_atom, n_atoms, n_channels, noise_level, random_state):

    tag = f"[{strategy} - {n_times} - {reg:.0e} - {random_state[0]}]"
    random_state = random_state[1]

    # Generate a problem
    t_start_generation = time.time()
    print(colorify(f"{tag} Signal generation..."), end='', flush=True)
    X, D_hat, lmbd_max = simulate_data(
        n_times=n_times, n_times_atom=n_times_atom, n_atoms=n_atoms,
        n_channels=n_channels, noise_level=noise_level,
        random_state=random_state)
    reg_ = reg * lmbd_max
    print(colorify(f"done ({time.time() - t_start_generation:.3f}s)."))

    *_, run_statistics = dicod(X, D_hat, reg_, n_workers=n_workers, tol=tol,
                               strategy=strategy, soft_lock=soft_lock,
                               **dicod_args)
    meta = dicod_args.copy()
    meta.update(n_atoms=n_atoms, n_times_atom=n_times_atom,
                n_channels=n_channels, noise_level=noise_level)
    runtime = run_statistics['runtime']

    print(colorify('=' * 79 +
                   f"\n{tag} End with {n_workers} workers in {runtime:.1e}\n" +
                   "=" * 79, color=GREEN))

    return ResultItem(n_workers=n_workers, strategy=strategy, reg=reg,
                      n_times=n_times, tol=tol, soft_lock=soft_lock, meta=meta,
                      random_state=random_state, **run_statistics)
Beispiel #6
0
def test_ztz(valid_shape, atom_shape):
    tol = .5
    reg = .1
    n_atoms = 7
    n_channels = 5
    random_state = None

    sig_shape = tuple([
        (size_valid_ax + size_atom_ax - 1)
        for size_atom_ax, size_valid_ax in zip(atom_shape, valid_shape)])

    rng = check_random_state(random_state)

    X = rng.randn(n_channels, *sig_shape)
    D = rng.randn(n_atoms, n_channels, *atom_shape)
    D /= np.sqrt(np.sum(D * D, axis=(1, 2), keepdims=True))

    z_hat, ztz, ztX, *_ = dicod(X, D, reg, tol=tol, n_jobs=N_WORKERS,
                                return_ztz=True, verbose=VERBOSE)

    ztz_full = compute_ztz(z_hat, atom_shape)
    assert np.allclose(ztz_full, ztz)

    ztX_full = compute_ztX(z_hat, X)
    assert np.allclose(ztX_full, ztX)
Beispiel #7
0
def run_one_grid(n_atoms, atom_support, reg, n_workers, grid, tol, soft_lock,
                 dicod_args, random_state):

    tag = f"[{soft_lock} - {reg:.0e} - {random_state[0]}]"
    random_state = random_state[1]

    # Generate a problem
    print(
        colorify(79 * "=" + f"\n{tag} Start with {n_workers} workers\n" +
                 79 * "="))
    X = get_mandril()
    D = init_dictionary(X, n_atoms, atom_support, random_state=random_state)
    reg_ = reg * get_lambda_max(X, D).max()

    if grid:
        w_world = 'auto'
    else:
        w_world = n_workers

    z_hat, *_, run_statistics = dicod(X,
                                      D,
                                      reg=reg_,
                                      n_seg='auto',
                                      strategy='greedy',
                                      w_world=w_world,
                                      n_workers=n_workers,
                                      timing=False,
                                      tol=tol,
                                      soft_lock=soft_lock,
                                      **dicod_args)

    runtime = run_statistics['runtime']
    sparsity = len(z_hat.nonzero()[0]) / z_hat.size

    print(
        colorify("=" * 79 + f"\n{tag} End for {n_workers} workers "
                 f"in {runtime:.1e}\n" + "=" * 79,
                 color=GREEN))

    return ResultItem(n_atoms=n_atoms,
                      atom_support=atom_support,
                      reg=reg,
                      n_workers=n_workers,
                      grid=grid,
                      tol=tol,
                      soft_lock=soft_lock,
                      random_state=random_state,
                      dicod_args=dicod_args,
                      sparsity=sparsity,
                      **run_statistics)
def run_one(n_times, n_times_atom, n_atoms, n_channels, noise_level,
            random_state, reg, tol, strategy, dicod_args):

    threadpool_limits(1)

    tag = f"[{strategy} - {n_times} - {reg}]"
    current_time = time.time() - START
    msg = f"\r{tag} started at T={current_time:.0f} sec"
    print(colorify(msg, BLUE))

    X, D_hat, lmbd_max = simulate_data(n_times,
                                       n_times_atom,
                                       n_atoms,
                                       n_channels,
                                       noise_level,
                                       random_state=random_state)
    reg_ = reg * lmbd_max

    n_seg = 1
    if strategy == 'lgcd':
        n_seg = 'auto'

    *_, pobj, run_statistics = dicod(X,
                                     D_hat,
                                     reg_,
                                     n_workers=1,
                                     tol=tol,
                                     strategy=strategy,
                                     n_seg=n_seg,
                                     **dicod_args)
    meta = dicod_args.copy()
    meta.update(n_times_atom=n_times_atom,
                n_atoms=n_atoms,
                n_channels=n_channels,
                noise_level=noise_level)

    duration = time.time() - START - current_time
    msg = (f"\r{tag} done in {duration:.0f} sec "
           f"at T={time.time() - START:.0f} sec")
    print(colorify(msg, GREEN))

    return ResultItem(reg=reg,
                      strategy=strategy,
                      tol=tol,
                      n_times=n_times,
                      meta=meta,
                      random_state=random_state,
                      pobj=pobj,
                      **run_statistics)
Beispiel #9
0
def test_stopping_criterion(n_workers, signal_support, atom_support):
    tol = 1
    reg = 1
    n_atoms = 10
    n_channels = 3

    rng = check_random_state(42)

    X = rng.randn(n_channels, *signal_support)
    D = rng.randn(n_atoms, n_channels, *atom_support)
    sum_axis = tuple(range(1, D.ndim))
    D /= np.sqrt(np.sum(D * D, axis=sum_axis, keepdims=True))

    z_hat, *_ = dicod(X, D, reg, tol=tol, n_workers=n_workers, verbose=VERBOSE)

    beta, dz_opt, _ = _init_beta(X, D, reg, z_i=z_hat)
    assert abs(dz_opt).max() < tol
Beispiel #10
0
def run_one_scaling_2d(n_atoms, atom_support, reg, n_workers, strategy, tol,
                       dicod_args, random_state):
    tag = f"[{strategy} - {reg:.0e} - {random_state[0]}]"
    random_state = random_state[1]

    # Generate a problem
    print(
        colorify(79 * "=" + f"\n{tag} Start with {n_workers} workers\n" +
                 79 * "="))
    X = get_mandril()
    D = init_dictionary(X, n_atoms, atom_support, random_state=random_state)
    reg_ = reg * get_lambda_max(X, D).max()

    z_hat, *_, run_statistics = dicod(X,
                                      D,
                                      reg=reg_,
                                      strategy=strategy,
                                      n_workers=n_workers,
                                      tol=tol,
                                      **dicod_args)

    runtime = run_statistics['runtime']
    sparsity = len(z_hat.nonzero()[0]) / z_hat.size
    print(
        colorify('=' * 79 + f"\n{tag} End with {n_workers} workers for reg="
                 f"{reg:.0e} in {runtime:.1e}\n" + "=" * 79,
                 color=GREEN))

    return ResultItem(n_atoms=n_atoms,
                      atom_support=atom_support,
                      reg=reg,
                      n_workers=n_workers,
                      strategy=strategy,
                      tol=tol,
                      dicod_args=dicod_args,
                      random_state=random_state,
                      sparsity=sparsity,
                      **run_statistics)
Beispiel #11
0
def test_ztz(valid_support, atom_support):
    tol = .5
    reg = .1
    n_atoms = 7
    n_channels = 5
    random_state = None

    sig_support = get_full_support(valid_support, atom_support)

    rng = check_random_state(random_state)

    X = rng.randn(n_channels, *sig_support)
    D = rng.randn(n_atoms, n_channels, *atom_support)
    D /= np.sqrt(np.sum(D * D, axis=(1, 2), keepdims=True))

    z_hat, ztz, ztX, *_ = dicod(X, D, reg, tol=tol, n_workers=N_WORKERS,
                                return_ztz=True, verbose=VERBOSE)

    ztz_full = compute_ztz(z_hat, atom_support)
    assert np.allclose(ztz_full, ztz)

    ztX_full = compute_ztX(z_hat, X)
    assert np.allclose(ztX_full, ztX)
Beispiel #12
0
def test_cost(valid_support, atom_support):

    tol = .5
    reg = 0
    n_atoms = 7
    n_channels = 5
    random_state = None

    sig_support = get_full_support(valid_support, atom_support)

    rng = check_random_state(random_state)

    D = rng.randn(n_atoms, n_channels, *atom_support)
    D /= np.sqrt(np.sum(D * D, axis=(1, 2), keepdims=True))
    z = rng.randn(n_atoms, *valid_support)
    z *= rng.rand(n_atoms, *valid_support) > .5

    X = rng.randn(n_channels, *sig_support)

    z_hat, *_, pobj, _ = dicod(X, D, reg, z0=z, tol=tol, n_workers=N_WORKERS,
                               max_iter=1000, freeze_support=True,
                               verbose=VERBOSE)
    cost = pobj[-1][2]
    assert np.isclose(cost, compute_objective(X, z_hat, D, reg))