Esempio n. 1
0
def test_multivariate_OMP():
    n_samples = 10
    n_features = 100
    n_dims = 90
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    verbose = False

    dico, signals, decomposition = _generate_testbed(
        kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
    )
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1)
    if verbose is True:
        for i in range(n_samples):
            # original signal decomposition, sorted by amplitude
            sorted_decomposition = np.zeros_like(decomposition[i]).view("float, int, int")
            for j in range(decomposition[i].shape[0]):
                sorted_decomposition[j] = tuple(decomposition[i][j, :].tolist())
            sorted_decomposition.sort(order=["f0"], axis=0)
            for j in reversed(sorted_decomposition):
                print(j)

            # decomposition found by OMP, also sorted
            sorted_d = np.zeros_like(d[i]).view("float, int, int")
            for j in range(d[i].shape[0]):
                sorted_d[j] = tuple(d[i][j, :].tolist())
            sorted_d.sort(order=["f0"], axis=0)
            for j in reversed(sorted_d):
                print(j)

    assert_array_almost_equal(
        reconstruct_from_code(d, dico, n_features), signals, decimal=3
    )
Esempio n. 2
0
def _verif_OMP():
    n_samples = 1000
    n_nonzero_coefs = 3

    for n_features in range(5, 50, 5):
        kernel_init_len = n_features - n_features / 2
        n_dims = n_features / 2
        n_kernels = n_features * 5
        dico, signals, _ = _generate_testbed(
            kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
        )
        r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs, n_jobs=1)
        reconstructed = reconstruct_from_code(d, dico, n_features)

        residual_energy = 0.0
        for sig, rec in zip(signals, reconstructed):
            residual_energy += ((sig - rec) ** 2).sum(1).mean()

        print(
            "Mean energy of the",
            n_samples,
            "residuals for",
            (n_features, n_dims),
            "features and",
            n_kernels,
            "kernels of",
            (kernel_init_len, n_dims),
            " is",
            residual_energy / n_samples,
        )
Esempio n. 3
0
def test_multivariate_OMP():
    n_samples = 10
    n_features = 100
    n_dims = 90
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    verbose = False

    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                     n_nonzero_coefs,
                                                     n_kernels,
                                                     n_samples, n_features,
                                                     n_dims)
    r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                      n_jobs = 1)
    if verbose == True:
        for i in range(n_samples):
            # original signal decomposition, sorted by amplitude
            sorted_decomposition = np.zeros_like(decomposition[i]).view('float, int, int')
            for j in range(decomposition[i].shape[0]):
                sorted_decomposition[j] = tuple(decomposition[i][j,:].tolist())
            sorted_decomposition.sort(order=['f0'], axis=0)
            for j in reversed(sorted_decomposition): print (j)
        
            # decomposition found by OMP, also sorted
            sorted_d = np.zeros_like(d[i]).view('float, int, int')
            for j in range(d[i].shape[0]):
                sorted_d[j] = tuple(d[i][j,:].tolist())
            sorted_d.sort(order=['f0'], axis=0)
            for j in reversed(sorted_d): print (j)
            
    assert_array_almost_equal(reconstruct_from_code(d, dico, n_features),
                             signals, decimal=3)
Esempio n. 4
0
def test_mdla_reconstruction():
    n_samples, n_features, n_dims = 10, 5, 3
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features
    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                     n_nonzero_coefs,
                                                     n_kernels)

    assert_array_almost_equal(
        reconstruct_from_code(decomposition, dico, n_features), signals)
Esempio n. 5
0
def test_mdla_reconstruction():
    n_kernels = 8
    n_nonzero_coefs = 3
    kernel_init_len = n_features 
    dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                     n_nonzero_coefs,
                                                     n_kernels)
    
    assert_array_almost_equal(reconstruct_from_code(decomposition,
                                                    dico, n_features),
                              signals)
Esempio n. 6
0
def plot_reconstruction_samples(X, r, code, kernels, n, figname):
    n_features = X[0].shape[0]
    energy_residual = zeros(len(r))
    for i in range(len(r)):
        energy_residual[i] = norm(r[i], 'fro')
    energy_sample = zeros(len(X))
    for i in range(len(X)):
        energy_sample[i] = norm(X[i], 'fro')

    energy_explained = energy_residual / energy_sample
    index = argsort(energy_explained)  # 0 =worse, end=best
    fig = plt.figure(figsize=(15, 9))
    k = fig.add_subplot(3, 2 * n, 1)
    k.set_xticklabels([])
    k.set_yticklabels([])
    for i in range(n):
        if i != 0: ka = fig.add_subplot(3, 2 * n, i + 1, sharex=k, sharey=k)
        else: ka = k
        ka.plot(X[index[i]])
        ka.set_title('s%d: %.1f%%' % (index[i], 100. *
                                      (1 - energy_explained[index[i]])))
        ka = fig.add_subplot(3, 2 * n, 2 * n + i + 1, sharex=k, sharey=k)
        ka.plot(r[index[i]])
        ka = fig.add_subplot(3, 2 * n, 4 * n + i + 1, sharex=k, sharey=k)
        s = reconstruct_from_code([code[index[i]]], kernels, n_features)
        ka.plot(s[0, :, :])
    for j, i in zip(range(n, 2 * n), range(n, 0, -1)):
        ka = fig.add_subplot(3, 2 * n, j + 1, sharex=k, sharey=k)
        ka.plot(X[index[-i]])
        ka.set_title('s%d: %.1f%%' % (index[-i], 100. *
                                      (1 - energy_explained[index[-i]])))
        ka = fig.add_subplot(3, 2 * n, 2 * n + j + 1, sharex=k, sharey=k)
        ka.plot(r[index[-i]])
        ka = fig.add_subplot(3, 2 * n, 4 * n + j + 1, sharex=k, sharey=k)
        s = reconstruct_from_code([code[index[-i]]], kernels, n_features)
        ka.plot(s[0, :, :])
    plt.tight_layout(.5)
    plt.savefig('EEG-reconstruction' + figname + '.png')
Esempio n. 7
0
def plot_reconstruction_samples(X, r, code, kernels, n, figname):
    n_features = X[0].shape[0]
    energy_residual = zeros(len(r))
    for i in range(len(r)):
        energy_residual[i] = norm(r[i], 'fro')
    energy_sample = zeros(len(X))
    for i in range(len(X)):
        energy_sample[i] = norm(X[i], 'fro')

    energy_explained=energy_residual/energy_sample
    index = argsort(energy_explained) # 0 =worse, end=best
    fig = plt.figure(figsize=(15, 9))
    k = fig.add_subplot(3, 2*n, 1)
    k.set_xticklabels([])
    k.set_yticklabels([])
    for i in range(n):
        if i != 0: ka = fig.add_subplot(3, 2*n, i+1, sharex=k, sharey=k)
        else: ka = k
        ka.plot(X[index[i]])
        ka.set_title('s%d: %.1f%%' % (index[i], 100.*(1-energy_explained[index[i]])))
        ka = fig.add_subplot(3, 2*n, 2*n+i+1, sharex=k, sharey=k)
        ka.plot(r[index[i]])
        ka = fig.add_subplot(3, 2*n, 4*n+i+1, sharex=k, sharey=k)
        s = reconstruct_from_code([code[index[i]]], kernels, n_features)  
        ka.plot(s[0,:,:])
    for j, i in zip(range(n, 2*n), range(n, 0, -1)):
        ka = fig.add_subplot(3, 2*n, j+1, sharex=k, sharey=k)
        ka.plot(X[index[-i]])
        ka.set_title('s%d: %.1f%%' % (index[-i], 100.*(1-energy_explained[index[-i]])))
        ka = fig.add_subplot(3, 2*n, 2*n+j+1, sharex=k, sharey=k)
        ka.plot(r[index[-i]])
        ka = fig.add_subplot(3, 2*n, 4*n+j+1, sharex=k, sharey=k)
        s = reconstruct_from_code([code[index[-i]]], kernels, n_features)  
        ka.plot(s[0,:,:])
    plt.tight_layout(.5)
    plt.savefig('EEG-reconstruction'+figname+'.png')
Esempio n. 8
0
def _verif_OMP():
    n_samples = 1000
    n_nonzero_coefs = 3

    for n_features in range (5, 50, 5):
        kernel_init_len = n_features - n_features/2
        n_dims = n_features/2
        n_kernels = n_features*5
        dico, signals, decomposition = _generate_testbed(kernel_init_len,
                                                        n_nonzero_coefs,
                                                        n_kernels,
                                                        n_samples, n_features,
                                                        n_dims)
        r, d = multivariate_sparse_encode(signals, dico, n_nonzero_coefs,
                                          n_jobs = 1)
        reconstructed = reconstruct_from_code(d, dico, n_features)

        residual_energy = 0.
        for sig, rec in zip(signals, reconstructed):
            residual_energy += ((sig-rec)**2).sum(1).mean()

        print ('Mean energy of the', n_samples, 'residuals for', (n_features, n_dims), 'features and', n_kernels, 'kernels of', (kernel_init_len, n_dims),' is', residual_energy/n_samples)