Exemplo n.º 1
0
def plot_atom_usage(X, kernels, n_nonzero_coefs, n_jobs, figname):
    r, code = multivariate_sparse_encode(X,
                                         kernels,
                                         n_nonzero_coefs=n_nonzero_coefs,
                                         n_jobs=n_jobs,
                                         verbose=2)
    n_kernels = len(kernels)
    amplitudes = zeros(n_kernels)
    for i in range(len(code)):
        for s in range(n_nonzero_coefs):
            amplitudes[int(code[i][s, 2])] += abs(code[i][s, 0])

    decomposition_weight = hstack([code[i][:, 2] for i in range(len(code))])
    decomposition_weight.sort()
    weight, _ = histogram(decomposition_weight, len(kernels), normed=False)
    order = weight.argsort()
    plot_kernels(kernels,
                 len(kernels),
                 order=order,
                 label=weight,
                 amp=amplitudes,
                 figname='EEG-kernels' + figname,
                 row=6)
    plot_coef_hist(decomposition_weight, figname)
    plot_weight_hist(amplitudes, figname)
    plot_reconstruction_samples(X, r, code, kernels, 3, figname)
Exemplo n.º 2
0
def test_multivariate_OMP():
    n_samples = 10
    n_features = 100
    n_dims = 90
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    verbose = False

    dico, signals, decomposition = _generate_testbed(
        kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
    )
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1)
    if verbose is True:
        for i in range(n_samples):
            # original signal decomposition, sorted by amplitude
            sorted_decomposition = np.zeros_like(decomposition[i]).view("float, int, int")
            for j in range(decomposition[i].shape[0]):
                sorted_decomposition[j] = tuple(decomposition[i][j, :].tolist())
            sorted_decomposition.sort(order=["f0"], axis=0)
            for j in reversed(sorted_decomposition):
                print(j)

            # decomposition found by OMP, also sorted
            sorted_d = np.zeros_like(d[i]).view("float, int, int")
            for j in range(d[i].shape[0]):
                sorted_d[j] = tuple(d[i][j, :].tolist())
            sorted_d.sort(order=["f0"], axis=0)
            for j in reversed(sorted_d):
                print(j)

    assert_array_almost_equal(
        reconstruct_from_code(d, dico, n_features), signals, decimal=3
    )
Exemplo n.º 3
0
def _verif_OMP():
    n_samples = 1000
    n_nonzero_coefs = 3

    for n_features in range(5, 50, 5):
        kernel_init_len = n_features - n_features / 2
        n_dims = n_features / 2
        n_kernels = n_features * 5
        dico, signals, _ = _generate_testbed(
            kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
        )
        r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1)
        reconstructed = reconstruct_from_code(d, dico, n_features)

        residual_energy = 0.0
        for sig, rec in zip(signals, reconstructed):
            residual_energy += ((sig - rec) ** 2).sum(1).mean()

        print(
            "Mean energy of the",
            n_samples,
            "residuals for",
            (n_features, n_dims),
            "features and",
            n_kernels,
            "kernels of",
            (kernel_init_len, n_dims),
            " is",
            residual_energy / n_samples,
        )
Exemplo n.º 4
0
def test_multivariate_OMP():
    n_samples = 10
    n_features = 100
    n_dims = 90
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    verbose = False

    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                     n_nonzero_coefs,
                                                     n_kernels,
                                                     n_samples, n_features,
                                                     n_dims)
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                      n_jobs = 1)
    if verbose == True:
        for i in range(n_samples):
            # original signal decomposition, sorted by amplitude
            sorted_decomposition = np.zeros_like(decomposition[i]).view('float, int, int')
            for j in range(decomposition[i].shape[0]):
                sorted_decomposition[j] = tuple(decomposition[i][j,:].tolist())
            sorted_decomposition.sort(order=['f0'], axis=0)
            for j in reversed(sorted_decomposition): print (j)
        
            # decomposition found by OMP, also sorted
            sorted_d = np.zeros_like(d[i]).view('float, int, int')
            for j in range(d[i].shape[0]):
                sorted_d[j] = tuple(d[i][j,:].tolist())
            sorted_d.sort(order=['f0'], axis=0)
            for j in reversed(sorted_d): print (j)
            
    assert_array_almost_equal(reconstruct_from_code(d, dico, n_features),
                             signals, decimal=3)
Exemplo n.º 5
0
def test_sparse_encode():
    n_kernels = 8
    dico = MultivariateDictLearning(n_kernels=n_kernels, random_state=0,
                                    max_iter=2, n_nonzero_coefs=1)
    dico = dico.fit(X)
    _, code = multivariate_sparse_encode(X, dico, n_nonzero_coefs=1,
                                        n_jobs=-1, verbose=3)
    assert_true(len(code[0]) <= 1)
Exemplo n.º 6
0
def test_sparse_encode():
    n_samples, n_features, n_dims = 10, 5, 3
    X = [rng_global.randn(n_features, n_dims) for i in range(n_samples)]
    n_kernels = 8
    dico = MultivariateDictLearning(
        n_kernels=n_kernels, random_state=0, max_iter=2, n_nonzero_coefs=1
    )
    dico = dico.fit(X)
    _, code = multivariate_sparse_encode(X, dico, n_nonzero_coefs=1, n_jobs=-1, verbose=3)
    assert len(code[0]) <= 1
Exemplo n.º 7
0
def _test_with_pydico_reload():
    import pickle

    n_nonzero_coefs = 3
    with open("skmdla.pck", "w") as f:
        o = pickle.load(f)
    f.close()
    dico = o["dico"]
    signals = o["signals"]
    _ = o["decomposition"]

    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1, verbose=4)
Exemplo n.º 8
0
def _test_with_pydico_reload():
    import pickle
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    with open('skmdla.pck', 'w') as f:
        o = pickle.load(f)
    f.close()
    dico = o['dico']
    signals = o['signals']
    decomposition = o['decomposition']
    
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                      n_jobs = 1, verbose=4)
Exemplo n.º 9
0
def _test_with_pydico_reload():
    import pickle
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    with open('skmdla.pck', 'w') as f:
        o = pickle.load(f)
    f.close()
    dico = o['dico']
    signals = o['signals']
    decomposition = o['decomposition']

    r, d = multivariate_sparse_encode(signals,
                                      dico,
                                      n_nonzero_coefs,
                                      n_jobs=1,
                                      verbose=4)
Exemplo n.º 10
0
def _test_with_pydico():
    import pickle, shutil
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                    n_nonzero_coefs, n_kernels)
    o = {'signals':signals, 'dico':dico, 'decomposition':decomposition}
    with open('skmdla.pck', 'w') as f:
        pickle.dump(o, f)
    f.close()
    shutil.copy('skmdla.pck', '../RC/skmdla.pck')

    print (signals)
    print (dico)
    
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                      n_jobs = 1, verbose=4)
Exemplo n.º 11
0
def decomposition_random_dictionary(Gaussian=True, rng=None, n_features=65, n_dims=1):
    """Generate a dataset from a random dictionary and compute decomposition

    A dataset of n_samples examples is generated from a random dictionary,
    each sample containing a random mixture of n_nonzero_coef atoms and has
    a dimension of n_features by n_dims. All the examples are decomposed with
    sparse multivariate OMP, written as:
    (Eq. 1) min_a ||x - Da ||^2 s.t. ||a||_0 <= k
    with x in R^(n_features x n_dims), D in R^(n_features x n_kernels) and
    a in R^n_kernels.

    Returns a ndarray of (n_nonzero_coefs, n_samples) containing all the
    root mean square error (RMSE) computed as the residual of the decomposition
    for all samples for sparsity constraint values of (Eq. 1) going from 1
    to n_nonzero_coefs.
    """
    n_samples = 100
    kernel_init_len = n_features
    n_kernels = 50
    n_jobs = 1

    dictionary, X, code = _generate_testbed(
        kernel_init_len=kernel_init_len,
        n_nonzero_coefs=n_nonzero_coefs,
        n_kernels=n_kernels,
        n_samples=n_samples,
        n_features=n_features,
        n_dims=n_dims,
        rng=rng_global,
        Gaussian=Gaussian,
    )
    rmse = zeros(shape=(n_nonzero_coefs, n_samples))
    for k in range(n_nonzero_coefs):
        for idx, s in enumerate(X):
            r, _ = multivariate_sparse_encode(
                array(s, ndmin=3),
                dictionary,
                n_nonzero_coefs=k + 1,
                n_jobs=n_jobs,
                verbose=1,
            )
            rmse[k, idx] = norm(r[0], "fro") / norm(s, "fro") * 100
    return rmse
Exemplo n.º 12
0
def plot_atom_usage(X, kernels, n_nonzero_coefs, n_jobs, figname):
    r, code = multivariate_sparse_encode(X, kernels,
                                         n_nonzero_coefs=n_nonzero_coefs,
                                         n_jobs=n_jobs, verbose=2)
    n_kernels=len(kernels)
    amplitudes = zeros(n_kernels)
    for i in range(len(code)):
        for s in range(n_nonzero_coefs):
            amplitudes[int(code[i][s,2])] += abs(code[i][s,0])
            
    decomposition_weight = hstack([code[i][:,2] for i in range(len(code))])
    decomposition_weight.sort()
    weight, _ = histogram(decomposition_weight, len(kernels), normed=False)
    order = weight.argsort()
    plot_kernels(kernels, len(kernels), order=order, label=weight,
                 amp=amplitudes, figname='EEG-kernels'+figname, row=6)
    plot_coef_hist(decomposition_weight, figname)
    plot_weight_hist(amplitudes, figname)
    plot_reconstruction_samples(X, r, code, kernels, 3, figname)
Exemplo n.º 13
0
def _test_with_pydico():
    import pickle
    import shutil

    n_features = 5
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    dico, signals, decomposition = _generate_testbed(
        kernel_init_len, n_nonzero_coefs, n_kernels
    )
    o = {"signals": signals, "dico": dico, "decomposition": decomposition}
    with open("skmdla.pck", "w") as f:
        pickle.dump(o, f)
    f.close()
    shutil.copy("skmdla.pck", "../RC/skmdla.pck")

    print(signals)
    print(dico)

    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1, verbose=4)
Exemplo n.º 14
0
def decomposition_random_dictionary(Gaussian=True, rng=None, n_features=65, n_dims=1):
    """Generate a dataset from a random dictionary and compute decomposition

    A dataset of n_samples examples is generated from a random dictionary,
    each sample containing a random mixture of n_nonzero_coef atoms and has
    a dimension of n_features by n_dims. All the examples are decomposed with 
    sparse multivariate OMP, written as:
    (Eq. 1) min_a ||x - Da ||^2 s.t. ||a||_0 <= k 
    with x in R^(n_features x n_dims), D in R^(n_features x n_kernels) and
    a in R^n_kernels. 

    Returns a ndarray of (n_nonzero_coefs, n_samples) containing all the
    root mean square error (RMSE) computed as the residual of the decomposition
    for all samples for sparsity constraint values of (Eq. 1) going from 1
    to n_nonzero_coefs.
    """
    n_samples = 100
    kernel_init_len = n_features
    n_kernels = 50
    n_jobs = 1

    dictionary, X, code = _generate_testbed(
        kernel_init_len=kernel_init_len,
        n_nonzero_coefs=n_nonzero_coefs,
        n_kernels=n_kernels,
        n_samples=n_samples,
        n_features=n_features,
        n_dims=n_dims,
        rng=rng_global,
        Gaussian=Gaussian,
    )
    rmse = zeros(shape=(n_nonzero_coefs, n_samples))
    for k in range(n_nonzero_coefs):
        for idx, s in enumerate(X):
            r, _ = multivariate_sparse_encode(
                array(s, ndmin=3), dictionary, n_nonzero_coefs=k + 1, n_jobs=n_jobs, verbose=1
            )
            rmse[k, idx] = norm(r[0], "fro") / norm(s, "fro") * 100
    return rmse
Exemplo n.º 15
0
def _verif_OMP():
    n_samples = 1000
    n_nonzero_coefs = 3

    for n_features in range (5, 50, 5):
        kernel_init_len = n_features - n_features/2
        n_dims = n_features/2
        n_kernels = n_features*5
        dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                        n_nonzero_coefs,
                                                        n_kernels,
                                                        n_samples, n_features,
                                                        n_dims)
        r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                          n_jobs = 1)
        reconstructed = reconstruct_from_code(d, dico, n_features)

        residual_energy = 0.
        for sig, rec in zip(signals, reconstructed):
            residual_energy += ((sig-rec)**2).sum(1).mean()

        print ('Mean energy of the', n_samples, 'residuals for', (n_features, n_dims), 'features and', n_kernels, 'kernels of', (kernel_init_len, n_dims),' is', residual_energy/n_samples)
Exemplo n.º 16
0
def _test_with_pydico():
    import pickle, shutil
    n_samples, n_features, n_dims = 10, 5, 3
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                     n_nonzero_coefs,
                                                     n_kernels)
    o = {'signals': signals, 'dico': dico, 'decomposition': decomposition}
    with open('skmdla.pck', 'w') as f:
        pickle.dump(o, f)
    f.close()
    shutil.copy('skmdla.pck', '../RC/skmdla.pck')

    print(signals)
    print(dico)

    r, d = multivariate_sparse_encode(signals,
                                      dico,
                                      n_nonzero_coefs,
                                      n_jobs=1,
                                      verbose=4)