コード例 #1
0
    # solver for the d-step
    solver_d='alternate_adaptive',
    solver_d_kwargs={'max_iter': 300},
    # Technical parameters
    verbose=1,
    random_state=0,
    n_jobs=6)

###############################################################################
# Here, we load the data from the somato-sensory dataset and preprocess them
# in epochs. The epochs are selected around the stim, starting 2 seconds
# before and finishing 4 seconds after.

from alphacsc.datasets.somato import load_data
t_lim = (-2, 4)
X, info = load_data(epoch=t_lim, sfreq=sfreq)

###############################################################################
# Fit the model and learn rank1 atoms
cdl.fit(X)

###############################################################################
# Display the 4-th atom, which displays a :math:`\mu`-waveform in its temporal
# pattern.

import mne
import numpy as np
import matplotlib.pyplot as plt

i_atom = 4
n_plots = 3
コード例 #2
0
    out_iterator = itertools.product(n_times_atom_list, n_atoms_list,
                                     n_channel_list, reg_list)

    for params in out_iterator:
        n_times_atom, n_atoms, n_channels, reg = params
        msg = 'n_times_atom, n_atoms, n_channels, reg = ' + str(params)
        print(colorify(msg, RED))
        print(colorify('-' * len(msg), RED))

        save_name = base_name + str(params)
        save_name = os.path.join('figures', save_name)

        all_results = []

        X, info = load_data(epoch=False, n_jobs=n_jobs, n_trials=2)

        if n_channels == 1:
            X = X[:, 0, :]  # take only one channel
        elif n_channels is not None:
            X = X[:, :n_channels, :]

        assert X.shape[0] > 1  # we need at least two trials for sporco
        X_shape = X.shape

        if n_channels == 1:
            methods = methods_univariate
        else:
            methods = methods_multivariate

        iterator = itertools.product(methods, range(n_states))
コード例 #3
0
                        help='run the experiment for multivariate')
    parser.add_argument('--wohlberg',
                        action="store_true",
                        help='run the experiment for wohlberg')

    args = parser.parse_args()

    # Use the caching utilities from joblib to same intermediate results and
    # avoid loosing computations when the interpreter crashes.
    mem = Memory(cachedir='.', verbose=0)
    cached_one_run = mem.cache(func=one_run)
    delayed_one_run = delayed(cached_one_run)

    # load somato data
    from alphacsc.datasets.somato import load_data
    X, info = load_data(epoch=False, n_jobs=args.njobs)

    # Set dictionary learning parameters
    n_atoms = 2  # K
    n_times_atom = 128  # L

    # Set the benchmarking parameters.
    reg = .005
    n_iter = 50
    n_states = 5

    # Select the method to run and the range of n_channels
    n_channels = X.shape[1]
    methods = [(run_multichannel, 'rank1', n_iter)]
    span_channels = np.unique(
        np.floor(np.logspace(0, np.log10(n_channels), 10)).astype(int))
コード例 #4
0
    # Plot the psd of the time atom
    ax = axes[0, 2]
    psd = np.abs(np.fft.rfft(v_hat))**2
    frequencies = np.linspace(0, sfreq / 2.0, len(psd))
    ax.semilogy(frequencies, psd)
    ax.set(xlabel='Frequencies (Hz)', title='Power Spectral Density')
    ax.grid(True)
    ax.set_xlim(0, 30)

    plt.tight_layout()
    plt.show()


# Define the parameters
sfreq = 150.
X, info = load_data(epoch=True, sfreq=sfreq)
n_trials, n_channels, n_times = X.shape

# Define the shape of the dictionary
n_atoms = 25
n_times_atom = int(round(sfreq * 1.0))  # 1000. ms

# Set parameter for our dictionary learning algorithm
reg = .2
n_iter = 50

cdl = BatchCDL(n_atoms=n_atoms,
               n_times_atom=n_times_atom,
               reg=reg,
               n_iter=n_iter,
               eps=2e3,
コード例 #5
0
    solver_d='alternate_adaptive',
    solver_d_kwargs={'max_iter': 300},
    # sort atoms by explained variances
    sort_atoms=True,
    # Technical parameters
    verbose=1,
    random_state=0,
    n_jobs=n_jobs)

###############################################################################
# Here, we load the data from the sample datase. The data is not epoched yet,
# but we split it into 12 parts to make the most of multiple processors during
# the model fitting.

from alphacsc.datasets.somato import load_data
X_split, info = load_data(sfreq=sfreq, dataset='sample', n_splits=2 * n_jobs)

###############################################################################
# Fit the model and learn rank1 atoms
cdl.fit(X_split)

###############################################################################
# To avoid artifacts due to the splitting, we can optionally reload the data.
X, info = load_data(sfreq=sfreq, dataset='sample', n_splits=1)

###############################################################################
# Then we call the `transform` method, which returns the sparse codes
# associated with X, without changing the dictionary learned during the `fit`.
z_hat = cdl.transform(X)

###############################################################################