示例#1
0
def run_dicodile_hubble(size, reg, L):
    X = get_hubble(size=size)

    D_init = init_dictionary(X, n_atoms, (L, L), random_state=random_state)

    dicod_kwargs = dict(soft_lock='border')
    pobj, times, D_hat, z_hat = dicodile(X,
                                         D_init,
                                         reg=reg,
                                         z_positive=True,
                                         n_iter=100,
                                         n_workers=400,
                                         eps=1e-5,
                                         tol=1e-3,
                                         verbose=2,
                                         dicod_kwargs=dicod_kwargs)

    # Save the atoms
    prefix = (f"K{n_atoms}_L{L}_reg{reg}"
              f"_seed{random_state}_dicodile_{size}_")
    prefix = prefix.replace(" ", "")
    np.save(f"hubble/{prefix}D_hat.npy", D_hat)
    z_hat[z_hat < 1e-2] = 0
    z_hat_save = [sparse.csr_matrix(z) for z in z_hat]
    np.save(f"hubble/{prefix}z_hat.npy", z_hat_save)

    plot_atom_and_coefs(D_hat, z_hat, prefix)
示例#2
0
def test_dicodile():

    X, D = simulate_data(n_times=100, n_times_atom=10, n_atoms=2, n_channels=3,
                         noise_level=1e-5, seed=42)

    pobj, times, D_hat, z_hat = dicodile(
            X, D, reg=.1, z_positive=True, n_iter=10, eps=1e-4,
            n_jobs=1, verbose=2, tol=1e-10)
    assert is_deacreasing(pobj)
示例#3
0
def compute_cdl(X,
                n_atoms,
                atom_support,
                D_init,
                reg=.2,
                window=False,
                n_jobs=10):
    """Compute dictionary using Dicodile.

    Parameters
    ----------
    X : ndarray, shape (n_channels, *signal_support)
        Signal from which the patterns are extracted. Note that this
        function is only working for a single image and a single channel.
    n_atoms : int
        Number of pattern to learn form the data
    atom_support : tuple(int, int)
        Support of the patterns that are learned.
    D_init: ndarray, shape (n_atoms, n_channels, *atom_support)
        Initial dictionary, used to start the algorithm.
    window: boolean (default: False)
        If set to True, use a window to force dictionary boundaries to zero.
    n_jobs: int (default: 10)
        Number of CPUs that can be used for the computations.

    Returns
    -------
    D_hat: ndarray, shape (n_atoms, n_channels, *atom_support)
        The learned dictionary
    """

    # Add a small noise to avoid having coefficients that are equals. They
    # might make the distributed optimization complicated.
    X_0 = X.copy()
    X_0 += X_0.std() * 1e-8 * np.random.randn(*X.shape)

    meta = dict(reg=reg, tol=1e-3, z_positive=True, n_iter=100, window=window)

    # fit the dictionary with dicodile
    pobj, times, D_hat, z_hat = dicodile(
        X_0,
        D_init,
        n_workers=n_jobs,
        w_world='auto',
        **meta,
        raise_on_increase=True,
        verbose=1,
    )

    # Order the dictionary based on the l1 norm of its activation
    i0 = abs(z_hat).sum(axis=(1, 2)).argsort()[::-1]
    return D_hat[i0], meta
示例#4
0
def run_one(method, n_atoms, atom_support, reg, z_positive, n_jobs, n_iter,
            tol, eps, random_state):

    X = get_hubble()[:, 512:1024, 512:1024]
    D_init = init_dictionary(X,
                             n_atoms,
                             atom_support,
                             random_state=random_state)

    if method == 'wohlberg':
        ################################################################
        #            Run parallel consensus ADMM
        #
        lmbd_max = get_lambda_max(X, D_init).max()
        print("Lambda max = {}".format(lmbd_max))
        reg_ = reg * lmbd_max

        D_init_ = np.transpose(D_init, axes=(3, 2, 1, 0))
        X_ = np.transpose(X[None], axes=(3, 2, 1, 0))

        options = {
            'Verbose': True,
            'StatusHeader': False,
            'MaxMainIter': n_iter,
            'CCMOD': {
                'rho': 1.0,
                'ZeroMean': False
            },
            'CBPDN': {
                'rho': 50.0 * reg_ + 0.5,
                'NonNegCoef': z_positive
            },
            'DictSize': D_init_.shape,
        }
        opt = ConvBPDNDictLearn_Consensus.Options(options)
        cdl = ConvBPDNDictLearn_Consensus(D_init_,
                                          X_,
                                          lmbda=reg_,
                                          nproc=n_jobs,
                                          opt=opt,
                                          dimK=1,
                                          dimN=2)

        _, pobj = cdl.solve()
        print(pobj)

        itstat = cdl.getitstat()
        times = itstat.Time

    elif method == "dicodile":
        pobj, times, D_hat, z_hat = dicodile(X,
                                             D_init,
                                             reg=reg,
                                             z_positive=z_positive,
                                             n_iter=n_iter,
                                             eps=eps,
                                             n_jobs=n_jobs,
                                             verbose=2,
                                             tol=tol)
        pobj = pobj[::2]
        times = np.cumsum(times)[::2]

    else:
        raise NotImplementedError()

    return ResultItem(n_atoms=n_atoms,
                      atom_support=atom_support,
                      reg=reg,
                      n_jobs=n_jobs,
                      random_state=random_state,
                      method=method,
                      z_positive=z_positive,
                      times=times,
                      pobj=pobj)