Exemplo n.º 1
0
def test_time_mask():
    """Test safe time masking
    """
    N = 10
    x = np.arange(N).astype(float)
    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
    assert_equal(_time_mask(x - 1e-10, 0, N - 1).sum(), N)
Exemplo n.º 2
0
    def spectral_ratio_ssd(self, ssd_sources):
        """Spectral ratio measure for best n_components selection
        See Nikulin 2011, Eq. (24)
        
        Parameters
        ----------
        ssd_sources : data projected on the SSD space. 
        output of transform
       
        """

        psd, freqs = psd_array_welch(ssd_sources,
                                     sfreq=self.sampling_freq,
                                     n_fft=self.n_fft)
        sig_idx = _time_mask(freqs, *self.freqs_signal)
        noise_idx = _time_mask(freqs, *self.freqs_noise)
        if psd.ndim == 3:
            spec_ratio = psd[:, :, sig_idx].mean(axis=2).mean(
                axis=0) / psd[:, :, noise_idx].mean(axis=2).mean(axis=0)

        else:

            spec_ratio = psd[:, sig_idx].mean(axis=1) / psd[:, noise_idx].mean(
                axis=1)
        sorter_spec = spec_ratio.argsort()[::-1]
        return spec_ratio, sorter_spec
Exemplo n.º 3
0
def test_time_mask():
    """Test safe time masking
    """
    N = 10
    x = np.arange(N).astype(float)
    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
    assert_equal(_time_mask(x - 1e-10, 0, N - 1).sum(), N)
Exemplo n.º 4
0
def epochs_compute_pe(epochs,
                      kernel,
                      tau,
                      tmin=None,
                      tmax=None,
                      backend='python',
                      method_params=None):
    """Compute Permutation Entropy (PE)

    Parameters
    ----------
    epochs : instance of mne.Epochs
        The epochs on which to compute the PE.
    kernel : int
        The number of samples to use to transform to a symbol
    tau : int
        The number of samples left between the ones that defines a symbol.
    backend : {'python', 'c'}
        The backend to be used. Defaults to 'python'.
    """
    if method_params is None:
        method_params = {}

    freq = epochs.info['sfreq']

    picks = mne.io.pick.pick_types(epochs.info, meg=True, eeg=True)

    data = epochs.get_data()[:, picks, ...]
    n_epochs = len(data)

    data = np.hstack(data)

    if 'filter_freq' in method_params:
        filter_freq = method_params['filter_freq']
    else:
        filter_freq = np.double(freq) / kernel / tau
    logger.info('Filtering  at %.2f Hz' % filter_freq)
    b, a = butter(6, 2.0 * filter_freq / np.double(freq), 'lowpass')

    fdata = np.transpose(
        np.array(np.split(filtfilt(b, a, data), n_epochs, axis=1)), [1, 2, 0])

    time_mask = _time_mask(epochs.times, tmin, tmax)
    fdata = fdata[:, time_mask, :]

    if backend == 'python':
        logger.info("Performing symbolic transformation")
        sym, count = _symb_python(fdata, kernel, tau)
        pe = np.nan_to_num(-np.nansum(count * np.log(count), axis=1))
    elif backend == 'c':
        from ..optimizations.jivaro import pe as jpe
        pe, sym = jpe(fdata, kernel, tau)
    else:
        raise ValueError('backend %s not supported for PE' % backend)
    nsym = math.factorial(kernel)
    pe = pe / np.log(nsym)
    return pe, sym
Exemplo n.º 5
0
def epochs_compute_komplexity(epochs,
                              nbins,
                              tmin=None,
                              tmax=None,
                              backend='python',
                              method_params=None):
    """Compute complexity (K)

    Parameters
    ----------
    epochs : instance of mne.Epochs
        The epochs on which to compute the wSMI.
    nbins : int
        Number of bins to use for symbolic transformation
    method_params : dictionary.
        Overrides default parameters for the backend used.
        OpenMP specific {'nthreads'}
    backend : {'python', 'openmp'}
        The backend to be used. Defaults to 'python'.
    """
    picks = pick_types(epochs.info, meg=True, eeg=True)

    if method_params is None:
        method_params = {}

    data = epochs.get_data()[:, picks if picks is not None else Ellipsis]
    time_mask = _time_mask(epochs.times, tmin, tmax)
    data = data[:, :, time_mask]
    logger.info("Running KolmogorovComplexity")

    if backend == 'python':
        start_time = time.time()
        komp = _komplexity_python(data, nbins)
        elapsed_time = time.time() - start_time
        logger.info("Elapsed time {} sec".format(elapsed_time))
    elif backend == 'openmp':
        from ..optimizations.ompk import komplexity as _ompk_k
        nthreads = (method_params['nthreads']
                    if 'nthreads' in method_params else 1)
        if nthreads == 'auto':
            try:
                import mkl
                nthreads = mkl.get_max_threads()
                logger.info(
                    'Autodetected number of threads {}'.format(nthreads))
            except:
                logger.info('Cannot autodetect number of threads')
                nthreads = 1
        start_time = time.time()
        komp = _ompk_k(data, nbins, nthreads)
        elapsed_time = time.time() - start_time
        logger.info("Elapsed time {} sec".format(elapsed_time))
    else:
        raise ValueError('backend %s not supported for KolmogorovComplexity' %
                         backend)
    return komp
Exemplo n.º 6
0
def compute_auc(dip, tmin=-np.inf, tmax=np.inf):
    """Compute the AUC values for a DipoleFixed object."""
    from mne.utils import _time_mask
    if not isinstance(dip, DipoleFixed):
        raise TypeError('dip must be a DipoleFixed, got "%s"' % (type(dip),))
    pick = pick_types(dip.info, meg=False, dipole=True)
    if len(pick) != 1:
        raise RuntimeError('Could not find dipole data')
    time_mask = _time_mask(dip.times, tmin, tmax, dip.info['sfreq'])
    data = dip.data[pick[0], time_mask]
    return np.sum(np.abs(data)) * len(data) * (1. / dip.info['sfreq'])
Exemplo n.º 7
0
def epochs_compute_cnv(epochs, tmin=None, tmax=None):
    """Compute contingent negative variation (CNV)

    Parameters
    ----------
    epochs : instance of Epochs
        The input data.
    tmin : float | None
        The first time point to include, if None, all samples form the first
        sample of the epoch will be used. Defaults to None.
    tmax : float | None
        The last time point to include, if None, all samples up to the last
        sample of the epoch wi  ll be used. Defaults to None.
    return_epochs : bool
        Whether to compute an average or not. If False, data will be
        averaged and put in an Evoked object. Defaults to False.

    Returns
    -------
    cnv : ndarray of float (n_channels, n_epochs) | instance of Evoked
        The regression slopes (betas) represewnting contingent negative
        variation.
    """
    picks = mne.pick_types(epochs.info, meg=True, eeg=True)
    n_epochs = len(epochs.events)
    n_channels = len(picks)
    # we reduce over time samples
    slopes = np.zeros((n_epochs, n_channels))
    intercepts = np.zeros((n_epochs, n_channels))
    if tmax is None:
        tmax = epochs.times[-1]
    if tmin is None:
        tmin = epochs.times[0]

    fit_range = np.where(_time_mask(epochs.times, tmin, tmax))[0]

    # design: intercept + increasing time
    design_matrix = np.c_[np.ones(len(fit_range)),
                          epochs.times[fit_range] - tmin]

    # estimate single trial regression over time samples
    scales = np.zeros(n_channels)
    info_ = pick_info(epochs.info, picks)
    for this_type, this_picks in _picks_by_type(info_):
        scales[this_picks] = _handle_default('scalings')[this_type]

    for ii, epoch in enumerate(epochs):
        y = epoch[picks][:, fit_range].T  # time is samples
        betas, _, _, _ = linalg.lstsq(a=design_matrix, b=y * scales)
        intercepts[ii] = betas[0]
        slopes[ii] = betas[1]

    return slopes, intercepts
Exemplo n.º 8
0
def time_mask(times, tmin, tmax):
    """Return a boolean mask of times according to a tmin/tmax.

    Parameters
    ----------
    times : array, shape (n_times,)
        The times (in seconds) to mask.
    tmin : float or int
        The minimum time to include in the mask
    tmax : float or int
        The maximum time to include in the mask

    Returns
    -------
    mask : array, dtype bool, shape (n_times,)
        A boolean mask with values True between tmin and tmax.
    """
    return _time_mask(times, tmin, tmax)
Exemplo n.º 9
0
    def _prepare_data(self, picks, target):
        this_picks = {k: None for k in ['times', 'channels', 'epochs']}
        if picks is not None:
            if any([x not in this_picks.keys() for x in picks.keys()]):
                raise ValueError('Picking is not compatible for {}'.format(
                    self._get_title()))
        if picks is None:
            picks = {}
        this_picks.update(picks)
        to_preserve = self._get_preserve_axis(target)
        if len(to_preserve) > 0:
            for axis in to_preserve:
                this_picks[axis] = None

        # Pick Times based on original times
        time_picks = this_picks['times']
        time_mask = _time_mask(self.epochs_.times, self.tmin, self.tmax)
        if time_picks is not None:
            picks_mask = np.zeros(len(time_mask), dtype=np.bool)
            picks_mask[time_picks] = True
            time_mask = np.logical_and(time_mask, picks_mask)

        # Pick epochs based on original indices
        epochs_picks = this_picks['epochs']
        this_epochs = self.epochs_
        if epochs_picks is not None:
            this_epochs = this_epochs[epochs_picks]

        # Pick channels based on original indices
        ch_picks = this_picks['channels']
        if ch_picks is None:
            ch_picks = pick_types(this_epochs.info, eeg=True, meg=True)

        if (self.subset and self.missing_nan
                and not epochs_has_event(this_epochs, self.subset)):
            data = np.array([[[np.nan]]])
        else:
            if self.subset:
                this_epochs = this_epochs[self.subset]
            data = this_epochs.get_data()[:, ch_picks][..., time_mask]

        return data
Exemplo n.º 10
0
def _build_design_matrix(X, y, sfreq, times, delays, tmin, tmax, names):
    X, y, tmin, tmax = _check_inputs(X, y, times, delays, tmin, tmax)
    if names is None:
        names = [str(i) for i in range(X.shape[1])]

    # Iterate through epochs with custom tmin/tmax if necessary
    X_out, y_out, lab_out = [[] for _ in range(3)]
    for i, (epX, epy, itmin, itmax) in enumerate(zip(X, y, tmin, tmax)):
        # Create delays
        epX_del = delay_timeseries(epX, sfreq, delays)

        # pull times of interest
        msk_time = _time_mask(times, itmin, itmax)
        epX_out = epX_del[:, msk_time]
        epy_out = epy[msk_time]

        # Unique labels for this epoch
        ep_lab = np.repeat(i + 1, epy_out.shape[-1])

        X_out.append(epX_out)
        y_out.append(epy_out)
        lab_out.append(ep_lab)
    return np.hstack(X_out), np.hstack(y_out), np.hstack(lab_out), names
Exemplo n.º 11
0
def _build_design_matrix(X, y, sfreq, times, delays, tmin, tmax, names):
    X, y, tmin, tmax = _check_inputs(X, y, times, delays, tmin, tmax)
    if names is None:
        names = [str(i) for i in range(X.shape[1])]

    # Iterate through epochs with custom tmin/tmax if necessary
    X_out, y_out, lab_out = [[] for _ in range(3)]
    for i, (epX, epy, itmin, itmax) in enumerate(zip(X, y, tmin, tmax)):
        # Create delays
        epX_del = delay_timeseries(epX, sfreq, delays)

        # pull times of interest
        msk_time = _time_mask(times, itmin, itmax)
        epX_out = epX_del[:, msk_time]
        epy_out = epy[msk_time]

        # Unique labels for this epoch
        ep_lab = np.repeat(i + 1, epy_out.shape[-1])

        X_out.append(epX_out)
        y_out.append(epy_out)
        lab_out.append(ep_lab)
    return np.hstack(X_out), np.hstack(y_out), np.hstack(lab_out), names
Exemplo n.º 12
0
def test_time_mask():
    """Test safe time masking."""
    N = 10
    x = np.arange(N).astype(float)
    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
    assert_equal(_time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum(), N)
    # non-uniformly spaced inputs
    x = np.array([4, 10])
    assert_equal(
        _time_mask(x[:1], tmin=10, sfreq=1, raise_error=False).sum(), 0)
    assert_equal(
        _time_mask(x[:1], tmin=11, tmax=12, sfreq=1, raise_error=False).sum(),
        0)
    assert_equal(_time_mask(x, tmin=10, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=6, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=5, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.5001, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.4999, sfreq=1).sum(), 2)
    assert_equal(_time_mask(x, tmin=4, sfreq=1).sum(), 2)
    # degenerate cases
    pytest.raises(ValueError, _time_mask, x[:1], tmin=11, tmax=12)
    pytest.raises(ValueError, _time_mask, x[:1], tmin=10, sfreq=1)
Exemplo n.º 13
0
def main():

    #################################################
    ## SETUP

    ## Get list of subject files
    subj_files = listdir(DAT_PATH)
    subj_files = [file for file in subj_files if EXT.lower() in file.lower()]

    ## Set up FOOOF Objects
    # Initialize FOOOF settings & objects objects
    fooof_settings = FOOOFSettings(peak_width_limits=PEAK_WIDTH_LIMITS, max_n_peaks=MAX_N_PEAKS,
                                   min_peak_amplitude=MIN_PEAK_AMP, peak_threshold=PEAK_THRESHOLD,
                                   aperiodic_mode=APERIODIC_MODE)
    fm = FOOOF(*fooof_settings, verbose=False)
    fg = FOOOFGroup(*fooof_settings, verbose=False)

    # Save out a settings file
    fg.save('0-FOOOF_Settings', pjoin(RES_PATH, 'FOOOF'), save_settings=True)

    # Set up the dictionary to store all the FOOOF results
    fg_dict = dict()
    for load_label in LOAD_LABELS:
        fg_dict[load_label] = dict()
        for side_label in SIDE_LABELS:
            fg_dict[load_label][side_label] = dict()
            for seg_label in SEG_LABELS:
                fg_dict[load_label][side_label][seg_label] = []

    ## Initialize group level data stores
    n_subjs, n_conds, n_times = len(subj_files), 3, N_TIMES
    group_fooofed_alpha_freqs = np.zeros(shape=[n_subjs])
    dropped_components = np.ones(shape=[n_subjs, 50]) * 999
    dropped_trials = np.ones(shape=[n_subjs, 1500]) * 999
    canonical_group_avg_dat = np.zeros(shape=[n_subjs, n_conds, n_times])
    fooofed_group_avg_dat = np.zeros(shape=[n_subjs, n_conds, n_times])

    # Set channel types
    ch_types = {'LHor' : 'eog', 'RHor' : 'eog', 'IVer' : 'eog', 'SVer' : 'eog',
                'LMas' : 'misc', 'RMas' : 'misc', 'Nose' : 'misc', 'EXG8' : 'misc'}

    #################################################
    ## RUN ACROSS ALL SUBJECTS

    # Run analysis across each subject
    for s_ind, subj_file in enumerate(subj_files):

        # Get subject label and print status
        subj_label = subj_file.split('.')[0]
        print('\nCURRENTLY RUNNING SUBJECT: ', subj_label, '\n')

        #################################################
        ## LOAD / ORGANIZE / SET-UP DATA

        # Load subject of data, apply apply fixes for channels, etc
        eeg_dat = mne.io.read_raw_edf(pjoin(DAT_PATH, subj_file),
                                      preload=True, verbose=False)

        # Fix channel name labels
        eeg_dat.info['ch_names'] = [chl[2:] for chl in \
            eeg_dat.ch_names[:-1]] + [eeg_dat.ch_names[-1]]
        for ind, chi in enumerate(eeg_dat.info['chs']):
            eeg_dat.info['chs'][ind]['ch_name'] = eeg_dat.info['ch_names'][ind]

        # Update channel types
        eeg_dat.set_channel_types(ch_types)

        # Set reference - average reference
        eeg_dat = eeg_dat.set_eeg_reference(ref_channels='average',
                                            projection=False, verbose=False)

        # Set channel montage
        chs = mne.channels.read_montage('standard_1020', eeg_dat.ch_names)
        eeg_dat.set_montage(chs)

        # Get event information & check all used event codes
        evs = mne.find_events(eeg_dat, shortest_event=1, verbose=False)

        # Pull out sampling rate
        srate = eeg_dat.info['sfreq']

        #################################################
        ## Pre-Processing: ICA

        # High-pass filter data for running ICA
        eeg_dat.filter(l_freq=1., h_freq=None, fir_design='firwin')

        if RUN_ICA:

            print("\nICA: CALCULATING SOLUTION\n")

            # ICA settings
            method = 'fastica'
            n_components = 0.99
            random_state = 47
            reject = {'eeg': 20e-4}

            # Initialize ICA object
            ica = ICA(n_components=n_components, method=method,
                      random_state=random_state)

            # Fit ICA
            ica.fit(eeg_dat, reject=reject)

            # Save out ICA solution
            ica.save(pjoin(RES_PATH, 'ICA', subj_label + '-ica.fif'))

        # Otherwise: load previously saved ICA to apply
        else:
            print("\nICA: USING PRECOMPUTED\n")
            ica = read_ica(pjoin(RES_PATH, 'ICA', subj_label + '-ica.fif'))

        # Find components to drop, based on correlation with EOG channels
        drop_inds = []
        for chi in EOG_CHS:
            inds, _ = ica.find_bads_eog(eeg_dat, ch_name=chi, threshold=2.5,
                                             l_freq=1, h_freq=10, verbose=False)
            drop_inds.extend(inds)
        drop_inds = list(set(drop_inds))

        # Set which components to drop, and collect record of this
        ica.exclude = drop_inds
        dropped_components[s_ind, 0:len(drop_inds)] = drop_inds

        # Apply ICA to data
        eeg_dat = ica.apply(eeg_dat)

        #################################################
        ## SORT OUT EVENT CODES

        # Extract a list of all the event labels
        all_trials = [it for it2 in EV_DICT.values() for it in it2]

        # Create list of new event codes to be used to label correct trials (300s)
        all_trials_new = [it + 100 for it in all_trials]
        # This is an annoying way to collapse across the doubled event markers from above
        all_trials_new = [it - 1 if not ind%2 == 0 else it for ind, it in enumerate(all_trials_new)]
        # Get labelled dictionary of new event names
        ev_dict2 = {k:v for k, v in zip(EV_DICT.keys(), set(all_trials_new))}

        # Initialize variables to store new event definitions
        evs2 = np.empty(shape=[0, 3], dtype='int64')
        lags = np.array([])

        # Loop through, creating new events for all correct trials
        t_min, t_max = -0.4, 3.0
        for ref_id, targ_id, new_id in zip(all_trials, CORR_CODES * 6, all_trials_new):

            t_evs, t_lags = mne.event.define_target_events(evs, ref_id, targ_id, srate,
                                                           t_min, t_max, new_id)

            if len(t_evs) > 0:
                evs2 = np.vstack([evs2, t_evs])
                lags = np.concatenate([lags, t_lags])

        #################################################
        ## FOOOF

        # Set channel of interest
        ch_ind = eeg_dat.ch_names.index(CHL)

        # Calculate PSDs over ~ first 2 minutes of data, for specified channel
        fmin, fmax = 1, 50
        tmin, tmax = 5, 125
        psds, freqs = mne.time_frequency.psd_welch(eeg_dat, fmin=fmin, fmax=fmax,
                                                   tmin=tmin, tmax=tmax,
                                                   n_fft=int(2*srate), n_overlap=int(srate),
                                                   n_per_seg=int(2*srate),
                                                   verbose=False)

        # Fit FOOOF across all channels
        fg.fit(freqs, psds, FREQ_RANGE, n_jobs=-1)

        # Save out FOOOF results
        fg.save(subj_label + '_fooof', pjoin(RES_PATH, 'FOOOF'), save_results=True)

        # Extract individualized CF from specified channel, add to group collection
        fm = fg.get_fooof(ch_ind, False)
        fooof_freq, _, _ = get_band_peak(fm.peak_params_, [7, 14])
        group_fooofed_alpha_freqs[s_ind] = fooof_freq

        # If not FOOOF alpha extracted, reset to 10
        if np.isnan(fooof_freq):
            fooof_freq = 10

        #################################################
        ## ALPHA FILTERING

        # CANONICAL: Filter data to canonical alpha band: 8-12 Hz
        alpha_dat = eeg_dat.copy()
        alpha_dat.filter(8, 12, fir_design='firwin', verbose=False)
        alpha_dat.apply_hilbert(envelope=True, verbose=False)

        # FOOOF: Filter data to FOOOF derived alpha band
        fooof_dat = eeg_dat.copy()
        fooof_dat.filter(fooof_freq-2, fooof_freq+2, fir_design='firwin')
        fooof_dat.apply_hilbert(envelope=True)

        #################################################
        ## EPOCH TRIALS

        # Set epoch timings
        tmin, tmax = -0.85, 1.1

        # Epoch trials - raw data for trial rejection
        epochs = mne.Epochs(eeg_dat, evs2, ev_dict2, tmin=tmin, tmax=tmax,
                            baseline=None, preload=True, verbose=False)

        # Epoch trials - filtered version
        epochs_alpha = mne.Epochs(alpha_dat, evs2, ev_dict2, tmin=tmin, tmax=tmax,
                                  baseline=(-0.5, -0.35), preload=True, verbose=False)
        epochs_fooof = mne.Epochs(fooof_dat, evs2, ev_dict2, tmin=tmin, tmax=tmax,
                                  baseline=(-0.5, -0.35), preload=True, verbose=False)

        #################################################
        ## PRE-PROCESSING: AUTO-REJECT
        if RUN_AUTOREJECT:

            print('\nAUTOREJECT: CALCULATING SOLUTION\n')

            # Initialize and run autoreject across epochs
            ar = AutoReject(n_jobs=4, verbose=False)
            ar.fit(epochs)

            # Save out AR solution
            ar.save(pjoin(RES_PATH, 'AR', subj_label + '-ar.hdf5'), overwrite=True)

        # Otherwise: load & apply previously saved AR solution
        else:
            print('\nAUTOREJECT: USING PRECOMPUTED\n')
            ar = read_auto_reject(pjoin(RES_PATH, 'AR', subj_label + '-ar.hdf5'))
            ar.verbose = 'tqdm'

        # Apply autoreject to the original epochs object it was learnt on
        epochs, rej_log = ar.transform(epochs, return_log=True)

        # Apply autoreject to the copies of the data - apply interpolation, then drop same epochs
        _apply_interp(rej_log, epochs_alpha, ar.threshes_, ar.picks_, ar.verbose)
        epochs_alpha.drop(rej_log.bad_epochs)
        _apply_interp(rej_log, epochs_fooof, ar.threshes_, ar.picks_, ar.verbose)
        epochs_fooof.drop(rej_log.bad_epochs)

        # Collect which epochs were dropped
        dropped_trials[s_ind, 0:sum(rej_log.bad_epochs)] = np.where(rej_log.bad_epochs)[0]

        #################################################
        ## SET UP CHANNEL CLUSTERS

        # Set channel clusters - take channels contralateral to stimulus presentation
        #  Note: channels will be used to extract data contralateral to stimulus presentation
        le_chs = ['P3', 'P5', 'P7', 'P9', 'O1', 'PO3', 'PO7']       # Left Side Channels
        le_inds = [epochs.ch_names.index(chn) for chn in le_chs]
        ri_chs = ['P4', 'P6', 'P8', 'P10', 'O2', 'PO4', 'PO8']      # Right Side Channels
        ri_inds = [epochs.ch_names.index(chn) for chn in ri_chs]

        #################################################
        ## TRIAL-RELATED ANALYSIS: CANONICAL vs. FOOOF

        ## Pull out channels of interest for each load level
        #  Channels extracted are those contralateral to stimulus presentation

        # Canonical Data
        lo1_a = np.concatenate([epochs_alpha['LeLo1']._data[:, ri_inds, :],
                                epochs_alpha['RiLo1']._data[:, le_inds, :]], 0)
        lo2_a = np.concatenate([epochs_alpha['LeLo2']._data[:, ri_inds, :],
                                epochs_alpha['RiLo2']._data[:, le_inds, :]], 0)
        lo3_a = np.concatenate([epochs_alpha['LeLo3']._data[:, ri_inds, :],
                                epochs_alpha['RiLo3']._data[:, le_inds, :]], 0)

        # FOOOFed data
        lo1_f = np.concatenate([epochs_fooof['LeLo1']._data[:, ri_inds, :],
                                epochs_fooof['RiLo1']._data[:, le_inds, :]], 0)
        lo2_f = np.concatenate([epochs_fooof['LeLo2']._data[:, ri_inds, :],
                                epochs_fooof['RiLo2']._data[:, le_inds, :]], 0)
        lo3_f = np.concatenate([epochs_fooof['LeLo3']._data[:, ri_inds, :],
                                epochs_fooof['RiLo3']._data[:, le_inds, :]], 0)

        ## Calculate average across trials and channels - add to group data collection

        # Canonical data
        canonical_group_avg_dat[s_ind, 0, :] = np.mean(lo1_a, 1).mean(0)
        canonical_group_avg_dat[s_ind, 1, :] = np.mean(lo2_a, 1).mean(0)
        canonical_group_avg_dat[s_ind, 2, :] = np.mean(lo3_a, 1).mean(0)

        # FOOOFed data
        fooofed_group_avg_dat[s_ind, 0, :] = np.mean(lo1_f, 1).mean(0)
        fooofed_group_avg_dat[s_ind, 1, :] = np.mean(lo2_f, 1).mean(0)
        fooofed_group_avg_dat[s_ind, 2, :] = np.mean(lo3_f, 1).mean(0)

        #################################################
        ## FOOOFING TRIAL AVERAGED DATA

        # Loop loop loads & trials segments
        for seg_label, seg_time in zip(SEG_LABELS, SEG_TIMES):
            tmin, tmax = seg_time[0], seg_time[1]

            # Calculate PSDs across trials, fit FOOOF models to averages
            for le_label, ri_label, load_label in zip(['LeLo1', 'LeLo2', 'LeLo3'],
                                                      ['RiLo1', 'RiLo2', 'RiLo3'],
                                                      LOAD_LABELS):

                ## Calculate trial wise PSDs for left & right side trials
                trial_freqs, le_trial_psds = periodogram(
                    epochs[le_label]._data[:, :, _time_mask(epochs.times, tmin, tmax, srate)],
                    srate, window='hann', nfft=4*srate)
                trial_freqs, ri_trial_psds = periodogram(
                    epochs[ri_label]._data[:, :, _time_mask(epochs.times, tmin, tmax, srate)],
                    srate, window='hann', nfft=4*srate)

                ## FIT ALL CHANNELS VERSION
                if FIT_ALL_CHANNELS:

                    ## Average spectra across trials within a given load & side
                    le_avg_psd_contra = avg_func(le_trial_psds[:, ri_inds, :], 0)
                    le_avg_psd_ipsi = avg_func(le_trial_psds[:, le_inds, :], 0)
                    ri_avg_psd_contra = avg_func(ri_trial_psds[:, le_inds, :], 0)
                    ri_avg_psd_ipsi = avg_func(ri_trial_psds[:, ri_inds, :], 0)

                    ## Combine spectra across left & right trials for given load
                    ch_psd_contra = np.vstack([le_avg_psd_contra, ri_avg_psd_contra])
                    ch_psd_ipsi = np.vstack([le_avg_psd_ipsi, ri_avg_psd_ipsi])

                    ## Fit FOOOFGroup to all channels, average & and collect results
                    fg.fit(trial_freqs, ch_psd_contra, FREQ_RANGE)
                    fm = avg_fg(fg)
                    fg_dict[load_label]['Contra'][seg_label].append(fm.copy())
                    fg.fit(trial_freqs, ch_psd_ipsi, FREQ_RANGE)
                    fm = avg_fg(fg)
                    fg_dict[load_label]['Ipsi'][seg_label].append(fm.copy())

                ## COLLAPSE ACROSS CHANNELS VERSION
                else:

                    ## Average spectra across trials and channels within a given load & side
                    le_avg_psd_contra = avg_func(avg_func(le_trial_psds[:, ri_inds, :], 0), 0)
                    le_avg_psd_ipsi = avg_func(avg_func(le_trial_psds[:, le_inds, :], 0), 0)
                    ri_avg_psd_contra = avg_func(avg_func(ri_trial_psds[:, le_inds, :], 0), 0)
                    ri_avg_psd_ipsi = avg_func(avg_func(ri_trial_psds[:, ri_inds, :], 0), 0)

                    ## Collapse spectra across left & right trials for given load
                    avg_psd_contra = avg_func(np.vstack([le_avg_psd_contra, ri_avg_psd_contra]), 0)
                    avg_psd_ipsi = avg_func(np.vstack([le_avg_psd_ipsi, ri_avg_psd_ipsi]), 0)

                    ## Fit FOOOF, and collect results
                    fm.fit(trial_freqs, avg_psd_contra, FREQ_RANGE)
                    fg_dict[load_label]['Contra'][seg_label].append(fm.copy())
                    fm.fit(trial_freqs, avg_psd_ipsi, FREQ_RANGE)
                    fg_dict[load_label]['Ipsi'][seg_label].append(fm.copy())

    #################################################
    ## SAVE OUT RESULTS

    # Save out group data
    np.save(pjoin(RES_PATH, 'Group', 'alpha_freqs_group'), group_fooofed_alpha_freqs)
    np.save(pjoin(RES_PATH, 'Group', 'canonical_group'), canonical_group_avg_dat)
    np.save(pjoin(RES_PATH, 'Group', 'fooofed_group'), fooofed_group_avg_dat)
    np.save(pjoin(RES_PATH, 'Group', 'dropped_trials'), dropped_trials)
    np.save(pjoin(RES_PATH, 'Group', 'dropped_components'), dropped_components)

    # Save out second round of FOOOFing
    for load_label in LOAD_LABELS:
        for side_label in SIDE_LABELS:
            for seg_label in SEG_LABELS:
                fg = combine_fooofs(fg_dict[load_label][side_label][seg_label])
                fg.save('Group_' + load_label + '_' + side_label + '_' + seg_label,
                        pjoin(RES_PATH, 'FOOOF'), save_results=True)
Exemplo n.º 14
0
def test_time_mask():
    """Test safe time masking."""
    N = 10
    x = np.arange(N).astype(float)
    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
    assert_equal(_time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum(), N)
    # non-uniformly spaced inputs
    x = np.array([4, 10])
    assert_equal(_time_mask(x[:1], tmin=10, sfreq=1,
                            raise_error=False).sum(), 0)
    assert_equal(_time_mask(x[:1], tmin=11, tmax=12, sfreq=1,
                            raise_error=False).sum(), 0)
    assert_equal(_time_mask(x, tmin=10, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=6, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=5, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.5001, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.4999, sfreq=1).sum(), 2)
    assert_equal(_time_mask(x, tmin=4, sfreq=1).sum(), 2)
    # degenerate cases
    assert_raises(ValueError, _time_mask, x[:1], tmin=11, tmax=12)
    assert_raises(ValueError, _time_mask, x[:1], tmin=10, sfreq=1)
Exemplo n.º 15
0
def freq_mask(freqs, fmin, fmax):
    """convenience function to select frequencies"""
    return _time_mask(freqs, fmin, fmax)
Exemplo n.º 16
0
    evoked.pick_channels(ch_names=right)
if evoked.comment == 'hand' and hemi == 'left':
    evoked.pick_channels(ch_names=left)
if evoked.comment == 'hand' and hemi == 'right':
    evoked.pick_channels(ch_names=right)
else:
    print("No condition named in file")
ev = evoked.copy()
ev.crop(tmin, tmax)
peak = ev.get_peak(return_amplitude=True, mode='abs')  # early lip window value

tmin = peak[1] - 0.015  # take 15 ms around peak
tmax = peak[1] + 0.015
time_mask = _time_mask(times=evoked.times,
                       tmin=tmin,
                       tmax=tmax,
                       sfreq=evoked.info['sfreq'],
                       raise_error=True)
pick = mne.pick_types(evoked.info, meg=True)
data = evoked.data[pick[0], time_mask]
auc = np.sum(np.abs(data)) * len(data) * (1. / evoked.info['sfreq'])

print('AUC value: %s' % auc)
print('peaklatency [seconds]: %s' % peak[1])

evoked.pick_channels(ch_names=[peak[0]])  # plot w peak channel

# plot channel with peak and AUC window
plt.figure()
plt.plot(evoked.times, evoked.data[0])
plt.axvline(peak[1], linestyle='-', color='r')
Exemplo n.º 17
0
def test_encodingmodel():
    """Test the encodingmodel fitting."""
    # Define data
    n_time = 3
    t_start = -.5
    sfreq = 1000
    n_channels = 5
    n_epochs = 10
    times = np.arange(n_time * sfreq) / float(sfreq) + t_start
    delays = np.arange(0, .4, .02)

    # Fitting parameters
    est = Ridge()
    n_iter = 4
    tmin_fit = 0
    tmax_fit = 1.5
    kws_fit = dict(times=times, tmin=tmin_fit, tmax=tmax_fit)
    msk_time = _time_mask(times, tmin_fit, tmax_fit)

    weights = 10 * rng.randn(n_channels * len(delays))
    X = rng.randn(n_epochs, n_channels, n_time * sfreq)
    y = np.stack([np.dot(weights, delay_timeseries(xep, sfreq, delays))
                  for xep in X])

    # --- Epochs data ---
    enc = EncodingModel(delays, est)
    enc.fit(X, y, sfreq, **kws_fit)

    # Make sure CV object and model is correct
    assert_true(isinstance(enc.cv, LabelShuffleSplit))
    assert_equal(enc.cv.labels.shape[-1],
                 np.hstack(y[..., msk_time]).shape[-1])
    assert_true(isinstance(enc.est.steps[-1][-1], type(est)))

    # Epochs w/ custom CV
    cv = LabelShuffleSplit
    cv_params = dict(n_iter=n_iter, test_size=.1)
    enc = EncodingModel(delays, est)
    enc.fit(X, y, sfreq, cv=cv, cv_params=cv_params, **kws_fit)
    assert_true(isinstance(enc.cv, LabelShuffleSplit))
    assert_equal(enc.cv.n_iter, n_iter)
    assert_equal(enc.cv.test_size, .1)

    # Make sure coefficients are correct
    assert_array_almost_equal(weights, enc.coefs_, decimal=2)
    assert_equal(enc.coefs_all_.shape[0], len(enc.cv))

    # Test incorrect inputs
    assert_raises(ValueError, enc.fit, X, y[:2], sfreq)
    assert_raises(ValueError, enc.fit, X, y[..., :5], sfreq)
    assert_raises(ValueError, enc.fit, X, y, sfreq, times=np.array([2, 3]))
    assert_raises(ValueError, enc.fit, X, y, sfreq,
                  tmin=0, tmax=np.array([1, 2]))

    # Test custom tstart / tstop for epochs
    tstarts = .2 * np.random.rand(n_epochs) - tmin_fit
    tstops = .2 * np.random.rand(n_epochs) + tmax_fit
    time_masks = np.array([_time_mask(times, itmin, itmax)
                          for itmin, itmax in zip(tstarts, tstops)])

    enc.fit(X, y, sfreq, times=times, tmin=tstarts, tmax=tstops)
    assert_equal(len(enc.cv.labels), time_masks.sum())

    # Giving time values outside of proper bounds
    assert_raises(ValueError, enc.fit, X, y, sfreq,
                  times=times, tmin=-2, tmax=0)
    assert_raises(ValueError, enc.fit, X, y, sfreq,
                  times=times, tmin=0, tmax=4)

    tstops[5] = 5
    assert_raises(ValueError, enc.fit, X, y, sfreq,
                  times=times, tmin=tstarts, tmax=tstops)

    # --- Single trial data ---
    enc.fit(X[0], y[0], sfreq, **kws_fit)

    # Make sure the CV was chosen correctly + has right time points
    assert_true(isinstance(enc.cv, KFold))
    assert_equal(enc.cv.n, times[msk_time].shape[-1])

    # Loosening the weight requirement because less data
    assert_array_almost_equal(weights, enc.coefs_, decimal=1)
Exemplo n.º 18
0
def epochs_compute_wsmi(epochs,
                        kernel,
                        tau,
                        tmin=None,
                        tmax=None,
                        backend='python',
                        method_params=None,
                        n_jobs='auto'):
    """Compute weighted mutual symbolic information (wSMI)

    Parameters
    ----------
    epochs : instance of mne.Epochs
        The epochs on which to compute the wSMI.
    kernel : int
        The number of samples to use to transform to a symbol
    tau : int
        The number of samples left between the ones that defines a symbol.
    method_params : dictionary.
        Overrides default parameters.
        OpenMP specific {'nthreads'}
    backend : {'python', 'openmp'}
        The backend to be used. Defaults to 'pytho'.
    """
    if method_params is None:
        method_params = {}

    if n_jobs == 'auto':
        try:
            import multiprocessing as mp
            import mkl
            n_jobs = int(mp.cpu_count() / mkl.get_max_threads())
            logger.info('Autodetected number of jobs {}'.format(n_jobs))
        except:
            logger.info('Cannot autodetect number of jobs')
            n_jobs = 1

    if 'bypass_csd' in method_params and method_params['bypass_csd'] is True:
        logger.info('Bypassing CSD')
        csd_epochs = epochs
    else:
        logger.info('Computing CSD')
        try:
            from pycsd import epochs_compute_csd
        except:
            raise ValueError('PyCSD not available. '
                             'Please install this dependency.')
        csd_epochs = epochs_compute_csd(epochs, n_jobs=n_jobs)

    freq = csd_epochs.info['sfreq']

    picks = mne.io.pick.pick_types(csd_epochs.info, meg=True, eeg=True)

    data = csd_epochs.get_data()[:, picks, ...]
    n_epochs = len(data)

    if 'filter_freq' in method_params:
        filter_freq = method_params['filter_freq']
    else:
        filter_freq = np.double(freq) / kernel / tau
    logger.info('Filtering  at %.2f Hz' % filter_freq)
    b, a = butter(6, 2.0 * filter_freq / np.double(freq), 'lowpass')
    data = np.hstack(data)

    fdata = np.transpose(
        np.array(np.split(filtfilt(b, a, data), n_epochs, axis=1)), [1, 2, 0])

    time_mask = _time_mask(epochs.times, tmin, tmax)
    fdata = fdata[:, time_mask, :]
    if backend == 'python':
        from .information_theory.permutation_entropy import _symb_python
        logger.info("Performing symbolic transformation")
        sym, count = _symb_python(fdata, kernel, tau)
        nsym = count.shape[1]
        wts = _get_weights_matrix(nsym)
        logger.info("Running wsmi with python...")
        wsmi, smi = _wsmi_python(sym, count, wts)
    elif backend == 'openmp':
        from .optimizations.jivaro import wsmi as jwsmi
        nsym = np.math.factorial(kernel)
        wts = _get_weights_matrix(nsym)
        nthreads = (method_params['nthreads']
                    if 'nthreads' in method_params else 1)
        if nthreads == 'auto':
            try:
                import mkl
                nthreads = mkl.get_max_threads()
                logger.info(
                    'Autodetected number of threads {}'.format(nthreads))
            except:
                logger.info('Cannot autodetect number of threads')
                nthreads = 1
        wsmi, smi, sym, count = jwsmi(fdata, kernel, tau, wts, nthreads)
    else:
        raise ValueError('backend %s not supported for wSMI' % backend)

    return wsmi, smi, sym, count
Exemplo n.º 19
0
def _phase_amplitude_coupling(data,
                              sfreq,
                              f_phase,
                              f_amp,
                              ixs,
                              pac_func='plv',
                              ev=None,
                              ev_grouping=None,
                              tmin=None,
                              tmax=None,
                              baseline=None,
                              baseline_kind='mean',
                              scale_amp_func=None,
                              use_times=None,
                              npad='auto',
                              return_data=False,
                              concat_epochs=True,
                              n_jobs=1,
                              verbose=None):
    """ Compute phase-amplitude coupling using pacpy.

    Parameters
    ----------
    data : array, shape ([n_epochs], n_channels, n_times)
        The data used to calculate PAC
    sfreq : float
        The sampling frequency of the data
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (2,)
        The frequency range to use for high-frequency amplitude modulation.
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.
    """
    from pacpy import pac as ppac
    if pac_func not in _pac_funcs:
        raise ValueError("PAC function {0} is not supported".format(pac_func))
    func = getattr(ppac, pac_func)
    ixs = np.array(ixs, ndmin=2)
    f_phase = np.atleast_2d(f_phase)
    f_amp = np.atleast_2d(f_amp)

    if data.ndim != 2:
        raise ValueError('Data must be shape (n_channels, n_times)')
    if ixs.shape[1] != 2:
        raise ValueError('Indices must have have a 2nd dimension of length 2')
    for ifreqs in [f_phase, f_amp]:
        if ifreqs.ndim > 2:
            raise ValueError('frequencies must be of shape (n_freq, 2)')
        if ifreqs.shape[1] != 2:
            raise ValueError('Phase frequencies must be of length 2')

    print('Pre-filtering data and extracting phase/amplitude...')
    hi_phase = pac_func in _hi_phase_funcs
    data_ph, data_am, ix_map_ph, ix_map_am = _pre_filter_ph_am(
        data, sfreq, ixs, f_phase, f_amp, npad=npad, hi_phase=hi_phase)
    ixs_new = [(ix_map_ph[i], ix_map_am[j]) for i, j in ixs]

    if ev is not None:
        use_times = [tmin, tmax] if use_times is None else use_times
        ev_grouping = np.ones_like(ev) if ev_grouping is None else ev_grouping
        data_ph, times, msk_ev = _array_raw_to_epochs(data_ph, sfreq, ev, tmin,
                                                      tmax)
        data_am, times, msk_ev = _array_raw_to_epochs(data_am, sfreq, ev, tmin,
                                                      tmax)

        # In case we cut off any events
        ev, ev_grouping = [i[msk_ev] for i in [ev, ev_grouping]]

        # Baselining before returning
        rescale(data_am, times, baseline, baseline_kind, copy=False)
        msk_time = _time_mask(times, *use_times)
        data_am, data_ph = [i[..., msk_time] for i in [data_am, data_ph]]

        # Stack epochs to a single trace if specified
        if concat_epochs is True:
            ev_unique = np.unique(ev_grouping)
            concat_data = []
            for i_ev in ev_unique:
                msk_events = ev_grouping == i_ev
                concat_data.append(
                    [np.hstack(i[msk_events]) for i in [data_am, data_ph]])
            data_am, data_ph = zip(*concat_data)
    else:
        data_ph = np.array([data_ph])
        data_am = np.array([data_am])
    data_ph = list(data_ph)
    data_am = list(data_am)

    if scale_amp_func is not None:
        for i in range(len(data_am)):
            data_am[i] = scale_amp_func(data_am[i], axis=-1)

    n_ep = len(data_ph)
    pac = np.zeros([n_ep, len(ixs_new)])
    pbar = ProgressBar(n_ep)
    for iep, (ep_ph, ep_am) in enumerate(zip(data_ph, data_am)):
        for iix, (i_ix_ph, i_ix_am) in enumerate(ixs_new):
            # f_phase and f_amp won't be used in this case
            pac[iep, iix] = func(ep_ph[i_ix_ph],
                                 ep_am[i_ix_am],
                                 f_phase,
                                 f_amp,
                                 filterfn=False)
        pbar.update_with_increment_value(1)
    if return_data:
        return pac, data_ph, data_am
    else:
        return pac
Exemplo n.º 20
0
def phase_amplitude_coupling(inst,
                             f_phase,
                             f_amp,
                             ixs,
                             pac_func='ozkurt',
                             ev=None,
                             ev_grouping=None,
                             tmin=None,
                             tmax=None,
                             baseline=None,
                             baseline_kind='mean',
                             scale_amp_func=None,
                             use_times=None,
                             npad='auto',
                             return_data=False,
                             concat_epochs=True,
                             n_jobs=1,
                             verbose=None):
    """ Compute phase-amplitude coupling between pairs of signals using pacpy.

    Parameters
    ----------
    inst : an instance of Raw or Epochs
        The data used to calculate PAC
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (n_amp_freqs,)
        The frequency range to use for high-frequency amplitude modulation.
        The signal will be bandpass filtered in pairs, so the minimum size
        must be 2 (for a single bandpass filter).
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.

    References
    ----------
    [1] This function uses the PacPy modulte developed by the Voytek lab.
        https://github.com/voytekresearch/pacpy
    """
    from mne.io.base import _BaseRaw
    from mne.epochs import _BaseEpochs
    if not isinstance(inst, (_BaseEpochs, _BaseRaw)):
        raise ValueError('Must supply either Epochs or Raw')

    sfreq = inst.info['sfreq']
    time_mask = _time_mask(inst.times, tmin, tmax)
    if isinstance(inst, _BaseRaw):
        if ev is None:
            start, stop = np.where(time_mask)[0][[0, -1]]
            data = inst[:, start:(stop + 1)][0]
        else:
            # In this case tmin/tmax are for creating epochs later
            data = inst[:, :][0]
    else:
        raise ValueError('Input must be of type Raw')
    pac = _phase_amplitude_coupling(data,
                                    sfreq,
                                    f_phase,
                                    f_amp,
                                    ixs,
                                    pac_func=pac_func,
                                    ev=ev,
                                    ev_grouping=ev_grouping,
                                    tmin=tmin,
                                    tmax=tmax,
                                    baseline=baseline,
                                    baseline_kind=baseline_kind,
                                    scale_amp_func=scale_amp_func,
                                    use_times=use_times,
                                    npad=npad,
                                    return_data=return_data,
                                    concat_epochs=concat_epochs,
                                    n_jobs=n_jobs,
                                    verbose=verbose)
    # Collect the data properly
    if return_data is True:
        pac, data_ph, data_am = pac
        return pac, data_ph, data_am
    else:
        return pac
Exemplo n.º 21
0
                        condition='deviant',
                        baseline=(None, 0))
 sfreq = deviant.info['sfreq']
 deviants.append(deviant)
 standard = read_evokeds(evoked_file,
                         condition='standard',
                         baseline=(None, 0))
 standards.append(standard)
 assert sfreq == standard.info['sfreq']
 if group == groups[0] and subj == subjects[0]:
     amp_data = np.zeros((len(groups), N))
     lat_data = np.zeros((len(groups), N))
     standard_data = np.zeros((len(groups), N))
 # area under curve
 tmin, tmax = (timing[ii] - .05, timing[ii])
 mask = _time_mask(deviant.times, tmin=tmin, tmax=tmax, sfreq=sfreq)
 dummy = deviant.copy().pick_channels(pairs[ii])
 amp_data[ii, si] = np.sum(np.abs(dummy.data[:, mask])) * \
                    len(dummy.data) * (1. / sfreq)
 lat_data[ii, si] = times[_get_peak(dummy.data,
                                    times,
                                    tmin=tmin,
                                    tmax=tmax)[1]]
 print("     Peak latency for %s at %.3f sec \n \n" %
       (subj, lat_data[ii, si]))
 del dummy
 # for the standard stimulus
 dummy = standard.copy().pick_channels(pairs[ii])
 standard_data[ii, si] = np.sum(np.abs(deviant.data[:, mask])) * \
                         len(deviant.data) * (1. / deviant.info['sfreq'])
 del dummy
Exemplo n.º 22
0
def test_time_mask():
    """Test safe time masking
    """
    N = 10
    x = np.arange(N).astype(float)
    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
    assert_equal(_time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum(), N)
    assert_equal(_time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum(), N)
    # non-uniformly spaced inputs
    x = np.array([4, 10])
    assert_equal(_time_mask(x[:1], tmin=10, sfreq=1).sum(), 0)
    assert_equal(_time_mask(x, tmin=10, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=6, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=5, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.5001, sfreq=1).sum(), 1)
    assert_equal(_time_mask(x, tmin=4.4999, sfreq=1).sum(), 2)
    assert_equal(_time_mask(x, tmin=4, sfreq=1).sum(), 2)
Exemplo n.º 23
0
def _phase_amplitude_coupling(data, sfreq, f_phase, f_amp, ixs,
                              pac_func='plv', ev=None, ev_grouping=None,
                              tmin=None, tmax=None,
                              baseline=None, baseline_kind='mean',
                              scale_amp_func=None, use_times=None, npad='auto',
                              return_data=False, concat_epochs=True, n_jobs=1,
                              verbose=None):
    """ Compute phase-amplitude coupling using pacpy.

    Parameters
    ----------
    data : array, shape ([n_epochs], n_channels, n_times)
        The data used to calculate PAC
    sfreq : float
        The sampling frequency of the data
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (2,)
        The frequency range to use for high-frequency amplitude modulation.
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.
    """
    from pacpy import pac as ppac
    if pac_func not in _pac_funcs:
        raise ValueError("PAC function {0} is not supported".format(pac_func))
    func = getattr(ppac, pac_func)
    ixs = np.array(ixs, ndmin=2)
    f_phase = np.atleast_2d(f_phase)
    f_amp = np.atleast_2d(f_amp)

    if data.ndim != 2:
        raise ValueError('Data must be shape (n_channels, n_times)')
    if ixs.shape[1] != 2:
        raise ValueError('Indices must have have a 2nd dimension of length 2')
    for ifreqs in [f_phase, f_amp]:
        if ifreqs.ndim > 2:
            raise ValueError('frequencies must be of shape (n_freq, 2)')
        if ifreqs.shape[1] != 2:
            raise ValueError('Phase frequencies must be of length 2')

    print('Pre-filtering data and extracting phase/amplitude...')
    hi_phase = pac_func in _hi_phase_funcs
    data_ph, data_am, ix_map_ph, ix_map_am = _pre_filter_ph_am(
        data, sfreq, ixs, f_phase, f_amp, npad=npad, hi_phase=hi_phase)
    ixs_new = [(ix_map_ph[i], ix_map_am[j]) for i, j in ixs]

    if ev is not None:
        use_times = [tmin, tmax] if use_times is None else use_times
        ev_grouping = np.ones_like(ev) if ev_grouping is None else ev_grouping
        data_ph, times, msk_ev = _array_raw_to_epochs(
            data_ph, sfreq, ev, tmin, tmax)
        data_am, times, msk_ev = _array_raw_to_epochs(
            data_am, sfreq, ev, tmin, tmax)

        # In case we cut off any events
        ev, ev_grouping = [i[msk_ev] for i in [ev, ev_grouping]]

        # Baselining before returning
        rescale(data_am, times, baseline, baseline_kind, copy=False)
        msk_time = _time_mask(times, *use_times)
        data_am, data_ph = [i[..., msk_time] for i in [data_am, data_ph]]

        # Stack epochs to a single trace if specified
        if concat_epochs is True:
            ev_unique = np.unique(ev_grouping)
            concat_data = []
            for i_ev in ev_unique:
                msk_events = ev_grouping == i_ev
                concat_data.append([np.hstack(i[msk_events])
                                    for i in [data_am, data_ph]])
            data_am, data_ph = zip(*concat_data)
    else:
        data_ph = np.array([data_ph])
        data_am = np.array([data_am])
    data_ph = list(data_ph)
    data_am = list(data_am)

    if scale_amp_func is not None:
        for i in range(len(data_am)):
            data_am[i] = scale_amp_func(data_am[i], axis=-1)

    n_ep = len(data_ph)
    pac = np.zeros([n_ep, len(ixs_new)])
    pbar = ProgressBar(n_ep)
    for iep, (ep_ph, ep_am) in enumerate(zip(data_ph, data_am)):
        for iix, (i_ix_ph, i_ix_am) in enumerate(ixs_new):
            # f_phase and f_amp won't be used in this case
            pac[iep, iix] = func(ep_ph[i_ix_ph], ep_am[i_ix_am],
                                 f_phase, f_amp, filterfn=False)
        pbar.update_with_increment_value(1)
    if return_data:
        return pac, data_ph, data_am
    else:
        return pac
Exemplo n.º 24
0
def test_encodingmodel():
    """Test the encodingmodel fitting."""
    # Define data
    n_time = 3
    t_start = -.5
    sfreq = 1000
    n_channels = 5
    n_epochs = 10
    times = np.arange(n_time * sfreq) / float(sfreq) + t_start
    delays = np.arange(0, .4, .02)

    # Fitting parameters
    est = Ridge()
    n_iter = 4
    tmin_fit = 0
    tmax_fit = 1.5
    kws_fit = dict(times=times, tmin=tmin_fit, tmax=tmax_fit)
    msk_time = _time_mask(times, tmin_fit, tmax_fit)

    weights = 10 * rng.randn(n_channels * len(delays))
    X = rng.randn(n_epochs, n_channels, n_time * sfreq)
    y = np.stack(
        [np.dot(weights, delay_timeseries(xep, sfreq, delays)) for xep in X])

    # --- Epochs data ---
    enc = EncodingModel(delays, est)
    enc.fit(X, y, sfreq, **kws_fit)

    # Make sure CV object and model is correct
    assert_true(isinstance(enc.cv, LabelShuffleSplit))
    assert_equal(enc.cv.labels.shape[-1],
                 np.hstack(y[..., msk_time]).shape[-1])
    assert_true(isinstance(enc.est.steps[-1][-1], type(est)))

    # Epochs w/ custom CV
    cv = LabelShuffleSplit
    cv_params = dict(n_iter=n_iter, test_size=.1)
    enc = EncodingModel(delays, est)
    enc.fit(X, y, sfreq, cv=cv, cv_params=cv_params, **kws_fit)
    assert_true(isinstance(enc.cv, LabelShuffleSplit))
    assert_equal(enc.cv.n_iter, n_iter)
    assert_equal(enc.cv.test_size, .1)

    # Make sure coefficients are correct
    assert_array_almost_equal(weights, enc.coefs_, decimal=2)
    assert_equal(enc.coefs_all_.shape[0], len(enc.cv))

    # Test incorrect inputs
    assert_raises(ValueError, enc.fit, X, y[:2], sfreq)
    assert_raises(ValueError, enc.fit, X, y[..., :5], sfreq)
    assert_raises(ValueError, enc.fit, X, y, sfreq, times=np.array([2, 3]))
    assert_raises(ValueError,
                  enc.fit,
                  X,
                  y,
                  sfreq,
                  tmin=0,
                  tmax=np.array([1, 2]))

    # Test custom tstart / tstop for epochs
    tstarts = .2 * np.random.rand(n_epochs) - tmin_fit
    tstops = .2 * np.random.rand(n_epochs) + tmax_fit
    time_masks = np.array([
        _time_mask(times, itmin, itmax)
        for itmin, itmax in zip(tstarts, tstops)
    ])

    enc.fit(X, y, sfreq, times=times, tmin=tstarts, tmax=tstops)
    assert_equal(len(enc.cv.labels), time_masks.sum())

    # Giving time values outside of proper bounds
    assert_raises(ValueError,
                  enc.fit,
                  X,
                  y,
                  sfreq,
                  times=times,
                  tmin=-2,
                  tmax=0)
    assert_raises(ValueError,
                  enc.fit,
                  X,
                  y,
                  sfreq,
                  times=times,
                  tmin=0,
                  tmax=4)

    tstops[5] = 5
    assert_raises(ValueError,
                  enc.fit,
                  X,
                  y,
                  sfreq,
                  times=times,
                  tmin=tstarts,
                  tmax=tstops)

    # --- Single trial data ---
    enc.fit(X[0], y[0], sfreq, **kws_fit)

    # Make sure the CV was chosen correctly + has right time points
    assert_true(isinstance(enc.cv, KFold))
    assert_equal(enc.cv.n, times[msk_time].shape[-1])

    # Loosening the weight requirement because less data
    assert_array_almost_equal(weights, enc.coefs_, decimal=1)
Exemplo n.º 25
0
def phase_amplitude_coupling(inst, f_phase, f_amp, ixs, pac_func='ozkurt',
                             ev=None, ev_grouping=None, tmin=None, tmax=None,
                             baseline=None, baseline_kind='mean',
                             scale_amp_func=None, use_times=None, npad='auto',
                             return_data=False, concat_epochs=True, n_jobs=1,
                             verbose=None):
    """ Compute phase-amplitude coupling between pairs of signals using pacpy.

    Parameters
    ----------
    inst : an instance of Raw or Epochs
        The data used to calculate PAC
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (n_amp_freqs,)
        The frequency range to use for high-frequency amplitude modulation.
        The signal will be bandpass filtered in pairs, so the minimum size
        must be 2 (for a single bandpass filter).
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.

    References
    ----------
    [1] This function uses the PacPy modulte developed by the Voytek lab.
        https://github.com/voytekresearch/pacpy
    """
    from mne.io.base import _BaseRaw
    from mne.epochs import _BaseEpochs
    if not isinstance(inst, (_BaseEpochs, _BaseRaw)):
        raise ValueError('Must supply either Epochs or Raw')

    sfreq = inst.info['sfreq']
    time_mask = _time_mask(inst.times, tmin, tmax)
    if isinstance(inst, _BaseRaw):
        if ev is None:
            start, stop = np.where(time_mask)[0][[0, -1]]
            data = inst[:, start:(stop + 1)][0]
        else:
            # In this case tmin/tmax are for creating epochs later
            data = inst[:, :][0]
    else:
        raise ValueError('Input must be of type Raw')
    pac = _phase_amplitude_coupling(data, sfreq, f_phase, f_amp, ixs,
                                    pac_func=pac_func, ev=ev,
                                    ev_grouping=ev_grouping,
                                    tmin=tmin, tmax=tmax, baseline=baseline,
                                    baseline_kind=baseline_kind,
                                    scale_amp_func=scale_amp_func,
                                    use_times=use_times, npad=npad,
                                    return_data=return_data,
                                    concat_epochs=concat_epochs,
                                    n_jobs=n_jobs, verbose=verbose)
    # Collect the data properly
    if return_data is True:
        pac, data_ph, data_am = pac
        return pac, data_ph, data_am
    else:
        return pac
Exemplo n.º 26
0
def test_time_mask():
    """Test safe time masking."""
    N = 10
    x = np.arange(N).astype(float)
    assert _time_mask(x, 0, N - 1).sum() == N
    assert _time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, None, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum() == N
    # non-uniformly spaced inputs
    x = np.array([4, 10])
    assert _time_mask(x[:1], tmin=10, sfreq=1, raise_error=False).sum() == 0
    assert _time_mask(x[:1], tmin=11, tmax=12, sfreq=1,
                      raise_error=False).sum() == 0
    assert _time_mask(x, tmin=10, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=6, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=5, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=4.5001, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=4.4999, sfreq=1).sum() == 2
    assert _time_mask(x, tmin=4, sfreq=1).sum() == 2
    # degenerate cases
    with pytest.raises(ValueError, match='No samples remain'):
        _time_mask(x[:1], tmin=11, tmax=12)
    with pytest.raises(ValueError, match='must be less than or equal to tmax'):
        _time_mask(x[:1], tmin=10, sfreq=1)
Exemplo n.º 27
0
def test_time_mask():
    """Test safe time masking."""
    N = 10
    x = np.arange(N).astype(float)
    assert _time_mask(x, 0, N - 1).sum() == N
    assert _time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, None, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum() == N
    assert _time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum() == N
    # non-uniformly spaced inputs
    x = np.array([4, 10])
    assert _time_mask(x[:1], tmin=10, sfreq=1, raise_error=False).sum() == 0
    assert _time_mask(x[:1], tmin=11, tmax=12, sfreq=1,
                      raise_error=False).sum() == 0
    assert _time_mask(x, tmin=10, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=6, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=5, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=4.5001, sfreq=1).sum() == 1
    assert _time_mask(x, tmin=4.4999, sfreq=1).sum() == 2
    assert _time_mask(x, tmin=4, sfreq=1).sum() == 2
    # degenerate cases
    with pytest.raises(ValueError, match='No samples remain'):
        _time_mask(x[:1], tmin=11, tmax=12)
    with pytest.raises(ValueError, match='must be less than or equal to tmax'):
        _time_mask(x[:1], tmin=10, sfreq=1)