コード例 #1
0
 def extract(input_):
     ix, path_to_a_file = input_
     data = np.load(path_to_a_file).item()
     data = np.array([data[key].reshape(-1) for key in label_list])
     data = filter_data(
         data,
         sfreq=250,
         l_freq=None,
         h_freq=30,
         method="fir",
         phase="minimum",
         n_jobs=1
     )
     # # baseline
     data = rescale(data, times, (-0.1, 0.0), mode="mean")
     data = rescale(data, times, (1.6, 2.6), mode="mean")
     data_dict[ix] = data
コード例 #2
0
def combine_grads(epochs, baseline=(-3.5, -3.2)):
    """Combines data from Epochs into a RMS data structure
    """

    data_picks = mne.fiff.pick_types(epochs.info, meg='grad', exclude='bads')
    data = epochs.get_data()[:, data_picks, :]
    data_rms = np.empty([data.shape[0], data.shape[1]/2, data.shape[2]])

    for i in range(len(data)):
        data_rms[i, :, :] = _merge_grad_data(data[i, :, :])

    if baseline is not None:
        data_rms = rescale(data_rms, times=epochs.times,
                           baseline=(-3.5, -3.2), mode="mean")
    return data_rms
コード例 #3
0
ファイル: utils.py プロジェクト: zezhangH/Dual-mVEPs-Speller
def apply_baseline(t, data, mode='mean'):
    """
    Simple wrapper of mne rescale function
    :param t: tuple (start, end, samplerate)
    :param data: ndarray of any shape with axis=-1 the time axis
    :param mode: 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
        refer to mne.baseline.rescale
    :return: ndarray
    """
    start, end, samplerate = t
    base = (start, 0)
    times = np.linspace(start, end, data.shape[-1])
    data = baseline.rescale(data,
                            times,
                            baseline=base,
                            mode=mode,
                            verbose=False)
    return data
コード例 #4
0
beh_file = files.get_files(beh_path, "beh-{}".format(subject), ".gz")[2][0]

meg_path = op.join(path, subject, "new_v1")
meg_files = files.get_files(meg_path, "epochs-TD", "-epo.fif")[2]
meg_files.sort()

# read data

beh = pd.read_pickle(beh_file)

data = [mne.read_epochs(i) for i in meg_files]
data = np.vstack([i.pick_types(ref_meg=False).get_data() for i in data])

times = np.linspace(-0.6, 2.6, num=801)
data = rescale(data, times, (-0.6, -0.5), mode="mean")

data[:, :, 525:] = rescale(data[:, :, 525:],
                           times[525:], (1.5, 1.6),
                           mode="mean")

data = data[:, :, [200, 300, 400]]

all_labels = np.array(beh.obs_dir_mod)

odd_ix = np.where(all_labels == -1)[0]
reg_ix = np.where(all_labels == 1)[0]

sample_reg = np.random.choice(reg_ix, odd_ix.shape[0])

sample_ix = np.hstack([odd_ix, sample_reg])
コード例 #5
0
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.

power = tfr_array_morlet(epochs.get_data(),
                         sfreq=epochs.info['sfreq'],
                         freqs=freqs,
                         n_cycles=n_cycles,
                         output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000,
                     freqs,
                     power[0],
                     cmap='RdBu_r',
                     vmin=vmin,
                     vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()

plt.show()
コード例 #6
0
power = tfr_morlet(epochs, freqs=freqs,
                   n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
              title='Using Morlet wavelets and EpochsTFR', show=False)

###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_frequencies, n_times)``.

power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
                         frequencies=freqs, n_cycles=n_cycles,
                         output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000, freqs, power[0],
                     cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()

plt.show()
コード例 #7
0
ファイル: TF.py プロジェクト: seapsy/DvM
    def TFanalysisMNE(self,
                      sj,
                      cnds,
                      cnd_header,
                      base_period,
                      time_period,
                      method='hilbert',
                      flip=None,
                      base_type='conspec',
                      downsample=1,
                      min_freq=5,
                      max_freq=40,
                      num_frex=25,
                      cycle_range=(3, 12),
                      freq_scaling='log'):
        '''
		Time frequency analysis using either morlet waveforms or filter-hilbertmethod for time frequency decomposition

		Add option to subtract ERP to get evoked power
		Add option to match trial number

		Arguments
		- - - - - 
		sj (int): subject number
		cnds (list): list of conditions as stored in behavior file
		cnd_header (str): key in behavior file that contains condition info
		base_period (tuple | list): time window used for baseline correction
		time_period (tuple | list): time window of interest
		method (str): specifies whether hilbert or wavelet convolution is used for time-frequency decomposition
		flip (dict): flips a subset of trials. Key of dictionary specifies header in beh that contains flip info 
		List in dict contains variables that need to be flipped. Note: flipping is done from right to left hemifield
		base_type (str): specifies whether DB conversion is condition specific ('conspec') or averaged across conditions ('conavg')
		downsample (int): factor used for downsampling (aplied after filtering). Default is no downsampling
		min_freq (int): minimum frequency for TF analysis
		max_freq (int): maximum frequency for TF analysis
		num_frex (int): number of frequencies in TF analysis
		cycle_range (tuple): number of cycles increases in the same number of steps used for scaling
		freq_scaling (str): specify whether frequencies are linearly or logarithmically spaced. 
							If main results are expected in lower frequency bands logarithmic scale 
							is adviced, whereas linear scale is advised for expected results in higher
							frequency bands
		Returns
		- - - 
		
		wavelets(array): 


	
		'''

        # read in data
        eegs, beh, times, s_freq, ch_names = self.selectTFData(sj)

        # flip subset of trials (allows for lateralization indices)
        if flip != None:
            key = flip.keys()[0]
            eegs = self.topoFlip(eegs,
                                 beh[key],
                                 ch_names,
                                 left=[flip.get(key)])

        # get parameters
        nr_time = eegs.shape[-1]
        nr_chan = eegs.shape[1]

        freqs = np.logspace(np.log10(min_freq), np.log10(max_freq), num_frex)
        nr_cycles = np.logspace(np.log10(cycle_range[0]),
                                np.log10(cycle_range[1]), num_frex)

        base_s, base_e = [np.argmin(abs(times - b)) for b in base_period]
        idx_time = np.where(
            (times >= time_period[0]) * (times <= time_period[1]))[0]
        idx_2_save = np.array(
            [idx for i, idx in enumerate(idx_time) if i % downsample == 0])

        # initiate dict
        tf = {}
        base = {}

        # loop over conditions
        for c, cnd in enumerate(cnds):
            tf.update({cnd: {}})
            base.update({cnd: np.zeros((num_frex, nr_chan))})

            cnd_idx = np.where(beh['block_type'] == cnd)[0]

            power = tfr_array_morlet(eegs[cnd_idx],
                                     sfreq=s_freq,
                                     freqs=freqs,
                                     n_cycles=nr_cycles,
                                     output='avg_power')

            # update cnd dict with power values
            tf[cnd]['power'] = np.swapaxes(power, 0, 1)
            tf[cnd]['base_power'] = rescale(np.swapaxes(power, 0, 1),
                                            times,
                                            base_period,
                                            mode='logratio')
            tf[cnd]['phase'] = '?'

        # save TF matrices
        with open(
                self.FolderTracker(['tf', method],
                                   '{}-tf-mne.pickle'.format(sj)),
                'wb') as handle:
            pickle.dump(tf, handle)

        # store dictionary with variables for plotting
        plot_dict = {
            'ch_names': ch_names,
            'times': times[idx_2_save],
            'frex': freqs
        }

        with open(
                self.FolderTracker(['tf', method],
                                   filename='plot_dict.pickle'),
                'wb') as handle:
            pickle.dump(plot_dict, handle)
コード例 #8
0
ファイル: connectivity.py プロジェクト: matarhaller/ECOGpy
def _phase_amplitude_coupling(data, sfreq, f_phase, f_amp, ixs,
                              pac_func='plv', ev=None, ev_grouping=None,
                              tmin=None, tmax=None,
                              baseline=None, baseline_kind='mean',
                              scale_amp_func=None, use_times=None, npad='auto',
                              return_data=False, concat_epochs=True, n_jobs=1,
                              verbose=None):
    """ Compute phase-amplitude coupling using pacpy.

    Parameters
    ----------
    data : array, shape ([n_epochs], n_channels, n_times)
        The data used to calculate PAC
    sfreq : float
        The sampling frequency of the data
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (2,)
        The frequency range to use for high-frequency amplitude modulation.
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.
    """
    from pacpy import pac as ppac
    if pac_func not in _pac_funcs:
        raise ValueError("PAC function {0} is not supported".format(pac_func))
    func = getattr(ppac, pac_func)
    ixs = np.array(ixs, ndmin=2)
    f_phase = np.atleast_2d(f_phase)
    f_amp = np.atleast_2d(f_amp)

    if data.ndim != 2:
        raise ValueError('Data must be shape (n_channels, n_times)')
    if ixs.shape[1] != 2:
        raise ValueError('Indices must have have a 2nd dimension of length 2')
    for ifreqs in [f_phase, f_amp]:
        if ifreqs.ndim > 2:
            raise ValueError('frequencies must be of shape (n_freq, 2)')
        if ifreqs.shape[1] != 2:
            raise ValueError('Phase frequencies must be of length 2')

    print('Pre-filtering data and extracting phase/amplitude...')
    hi_phase = pac_func in _hi_phase_funcs
    data_ph, data_am, ix_map_ph, ix_map_am = _pre_filter_ph_am(
        data, sfreq, ixs, f_phase, f_amp, npad=npad, hi_phase=hi_phase)
    ixs_new = [(ix_map_ph[i], ix_map_am[j]) for i, j in ixs]

    if ev is not None:
        use_times = [tmin, tmax] if use_times is None else use_times
        ev_grouping = np.ones_like(ev) if ev_grouping is None else ev_grouping
        data_ph, times, msk_ev = _array_raw_to_epochs(
            data_ph, sfreq, ev, tmin, tmax)
        data_am, times, msk_ev = _array_raw_to_epochs(
            data_am, sfreq, ev, tmin, tmax)

        # In case we cut off any events
        ev, ev_grouping = [i[msk_ev] for i in [ev, ev_grouping]]

        # Baselining before returning
        rescale(data_am, times, baseline, baseline_kind, copy=False)
        msk_time = _time_mask(times, *use_times)
        data_am, data_ph = [i[..., msk_time] for i in [data_am, data_ph]]

        # Stack epochs to a single trace if specified
        if concat_epochs is True:
            ev_unique = np.unique(ev_grouping)
            concat_data = []
            for i_ev in ev_unique:
                msk_events = ev_grouping == i_ev
                concat_data.append([np.hstack(i[msk_events])
                                    for i in [data_am, data_ph]])
            data_am, data_ph = zip(*concat_data)
    else:
        data_ph = np.array([data_ph])
        data_am = np.array([data_am])
    data_ph = list(data_ph)
    data_am = list(data_am)

    if scale_amp_func is not None:
        for i in range(len(data_am)):
            data_am[i] = scale_amp_func(data_am[i], axis=-1)

    n_ep = len(data_ph)
    pac = np.zeros([n_ep, len(ixs_new)])
    pbar = ProgressBar(n_ep)
    for iep, (ep_ph, ep_am) in enumerate(zip(data_ph, data_am)):
        for iix, (i_ix_ph, i_ix_am) in enumerate(ixs_new):
            # f_phase and f_amp won't be used in this case
            pac[iep, iix] = func(ep_ph[i_ix_ph], ep_am[i_ix_am],
                                 f_phase, f_amp, filterfn=False)
        pbar.update_with_increment_value(1)
    if return_data:
        return pac, data_ph, data_am
    else:
        return pac
コード例 #9
0
ファイル: viz.py プロジェクト: starzynski/mne-python
def plot_topo_phase_lock(epochs,
                         phase,
                         freq,
                         layout,
                         baseline=None,
                         mode='mean',
                         decim=1,
                         colorbar=True,
                         vmin=None,
                         vmax=None,
                         cmap=None,
                         layout_scale=0.945):
    """Plot phase locking values on sensor layout

    Parameters
    ----------
    epochs : instance of Epochs
        The epochs used to generate the phase locking value
    phase_lock : 3D-array
        Phase locking value, second return value from
        mne.time_frequency.induced_power.
    freq : array-like
        Frequencies of interest as passed to induced_power
    layout: instance of Layout
        System specific sensor positions.
    baseline: tuple or list of length 2
        The time interval to apply rescaling / baseline correction.
        If None do not apply it. If baseline is (a, b)
        the interval is between "a (s)" and "b (s)".
        If a is None the beginning of the data is used
        and if b is None then b is set to the end of the interval.
        If baseline is equal to (None, None) all the time
        interval is used.
    mode: 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
        Do baseline correction with ratio (phase is divided by mean
        phase during baseline) or z-score (phase is divided by standard
        deviation of phase during baseline after subtracting the mean,
        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
        If None, baseline no correction will be performed.
    decim : integer
        Increment for selecting each nth time slice
    colorbar : bool
        If true, colorbar will be added to the plot
    vmin : float
        minimum value mapped to lowermost color
    vmax : float
        minimum value mapped to upppermost color
    cmap : instance of matplotlib.pylab.colormap
        Colors to be mapped to the values
    layout_scale: float
        scaling factor for adjusting the relative size of the layout
        on the canvas.

    Returns
    -------
    fig : Instance of matplotlib.figure.Figrue
        Phase lock images at sensor locations
    """
    if mode is not None:  # do baseline correction
        if baseline is None:
            baseline = epochs.baseline
        times = epochs.times[::decim] * 1e3
        phase = rescale(phase.copy(), times, baseline, mode)
    if vmin is None:
        vmin = phase.min()
    if vmax is None:
        vmax = phase.max()

    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)

    fig = _plot_topo_imshow(epochs,
                            phase_imshow,
                            layout,
                            decim=decim,
                            colorbar=colorbar,
                            vmin=vmin,
                            vmax=vmax,
                            cmap=cmap,
                            layout_scale=layout_scale)

    return fig
コード例 #10
0
reg_colour = "#00A4CC"
odd_colour = "#F95700"
sign_colour = "#00ff00"
non_sign_colour = "#cccccc"

reg_data = np.array(regular[key]) * 1e14
odd_data = np.array(odd[key]) * 1e14

obs_range = (100, 500)
rot_range = (500, 776)
obs_times = np.linspace(-0.1, 1.5, num=np.diff(obs_range)[0])
rot_times = np.linspace(-0.1, 1.0, num=np.diff(rot_range)[0])

rot_reg = rescale(reg_data[:, rot_range[0]:rot_range[1]],
                  rot_times, (-0.1, 0.0),
                  mode="mean")
rot_reg_mean = np.average(rot_reg, axis=0)
rot_reg_sem = sem(rot_reg, axis=0)
obs_reg = rescale(reg_data[:, obs_range[0]:obs_range[1]],
                  obs_times, (-0.1, 0.0),
                  mode="mean")
obs_reg_mean = np.average(obs_reg, axis=0)
obs_reg_sem = sem(obs_reg, axis=0)
rot_odd = rescale(odd_data[:, rot_range[0]:rot_range[1]],
                  rot_times, (-0.1, 0.0),
                  mode="mean")
rot_odd_mean = np.average(rot_odd, axis=0)
rot_odd_sem = sem(rot_odd, axis=0)
obs_odd = rescale(odd_data[:, obs_range[0]:obs_range[1]],
                  obs_times, (-0.1, 0.0),
コード例 #11
0
###############################################################################
# Now we can compute the Global Field Power
# We can track the emergence of spatial patterns compared to baseline
# for each frequency band, with a bootstrapped confidence interval.
#
# We see dominant responses in the Alpha and Beta bands.

fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
        frequency_map, colors, axes.ravel()[::-1]):
    times = average.times * 1e3
    gfp = np.sum(average.data ** 2, axis=0)
    gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
    ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
    ax.axhline(0, linestyle='--', color='grey', linewidth=2)
    ci_low, ci_up = _bootstrap_ci(average.data, random_state=0,
                                  statfun=lambda x: np.sum(x ** 2, axis=0))
    ci_low = rescale(ci_low, average.times, baseline=(None, 0))
    ci_up = rescale(ci_up, average.times, baseline=(None, 0))
    ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
    ax.grid(True)
    ax.set_ylabel('GFP')
    ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
                xy=(0.95, 0.8),
                horizontalalignment='right',
                xycoords='axes fraction')
    ax.set_xlim(-1000, 3000)

axes.ravel()[-1].set_xlabel('Time [ms]')
コード例 #12
0
            label="freq{}{}".format(str(row),str(column))
        )

        # read the data
        data_files = files.get_files(
            output_dir,
            "{}".format(freq_key), # respo specific
            ".npy"
        )[2]
        data_files.sort()

        odd, regular = [np.load(i).item() for i in data_files]
        # # data processing
        reg_data = np.array(regular[group_key]) * 1e14
        reg_data = reg_data[:, x_range[0]:x_range[1]]
        reg_data = rescale(reg_data, times, baseline, mode="mean")
        reg_mean = np.average(reg_data, axis=0)
        reg_sem = sem(reg_data, axis=0)
        odd_data = np.array(odd[group_key]) * 1e14
        odd_data = odd_data[:, x_range[0]:x_range[1]]
        odd_data = rescale(odd_data, times, baseline, mode="mean")
        odd_mean = np.average(odd_data, axis=0)
        odd_sem = sem(odd_data, axis=0)

        threshold=2.0

        T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
            [reg_data, odd_data], 
            n_permutations=5000, 
            threshold=threshold, 
            tail=0, 
コード例 #13
0
ファイル: viz.py プロジェクト: starzynski/mne-python
def plot_topo_phase_lock(epochs, phase, freq, layout, baseline=None,
                         mode='mean', decim=1, colorbar=True, vmin=None,
                         vmax=None, cmap=None, layout_scale=0.945):
    """Plot phase locking values on sensor layout

    Parameters
    ----------
    epochs : instance of Epochs
        The epochs used to generate the phase locking value
    phase_lock : 3D-array
        Phase locking value, second return value from
        mne.time_frequency.induced_power.
    freq : array-like
        Frequencies of interest as passed to induced_power
    layout: instance of Layout
        System specific sensor positions.
    baseline: tuple or list of length 2
        The time interval to apply rescaling / baseline correction.
        If None do not apply it. If baseline is (a, b)
        the interval is between "a (s)" and "b (s)".
        If a is None the beginning of the data is used
        and if b is None then b is set to the end of the interval.
        If baseline is equal to (None, None) all the time
        interval is used.
    mode: 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
        Do baseline correction with ratio (phase is divided by mean
        phase during baseline) or z-score (phase is divided by standard
        deviation of phase during baseline after subtracting the mean,
        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
        If None, baseline no correction will be performed.
    decim : integer
        Increment for selecting each nth time slice
    colorbar : bool
        If true, colorbar will be added to the plot
    vmin : float
        minimum value mapped to lowermost color
    vmax : float
        minimum value mapped to upppermost color
    cmap : instance of matplotlib.pylab.colormap
        Colors to be mapped to the values
    layout_scale: float
        scaling factor for adjusting the relative size of the layout
        on the canvas.

    Returns
    -------
    fig : Instance of matplotlib.figure.Figrue
        Phase lock images at sensor locations
    """
    if mode is not None:  # do baseline correction
        if baseline is None:
            baseline = epochs.baseline
        times = epochs.times[::decim] * 1e3
        phase = rescale(phase.copy(), times, baseline, mode)
    if vmin is None:
        vmin = phase.min()
    if vmax is None:
        vmax = phase.max()

    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)

    fig = _plot_topo_imshow(epochs, phase_imshow, layout, decim=decim,
                            colorbar=colorbar, vmin=vmin, vmax=vmax,
                            cmap=cmap,  layout_scale=layout_scale)

    return fig
コード例 #14
0
                                 show=False)
        row += 1
        ax = figure.add_subplot(gs[row, column],
                                label="freq{}{}".format(str(row), str(column)))

        # read the data
        data_files = files.get_files(
            output_dir,
            "resp-{}".format(freq_key),  # respo specific
            ".npy")[2]
        data_files.sort()

        odd, regular = [np.load(i).item() for i in data_files]
        # # data processing
        reg_data = np.array(regular[group_key]) * 1e14
        reg_data = rescale(reg_data, times, (-0.1, 0.0), mode="mean")
        reg_mean = np.average(reg_data, axis=0)
        reg_sem = sem(reg_data, axis=0)
        odd_data = np.array(odd[group_key]) * 1e14
        odd_data = rescale(odd_data, times, (-0.1, 0.0), mode="mean")
        odd_mean = np.average(odd_data, axis=0)
        odd_sem = sem(odd_data, axis=0)

        # plot results

        ax.plot(times, reg_mean, linewidth=1, color=reg_colour)
        ax.fill_between(times,
                        reg_mean + reg_sem,
                        reg_mean - reg_sem,
                        color=reg_colour,
                        alpha=0.2,
コード例 #15
0
def _phase_amplitude_coupling(data,
                              sfreq,
                              f_phase,
                              f_amp,
                              ixs,
                              pac_func='plv',
                              ev=None,
                              ev_grouping=None,
                              tmin=None,
                              tmax=None,
                              baseline=None,
                              baseline_kind='mean',
                              scale_amp_func=None,
                              use_times=None,
                              npad='auto',
                              return_data=False,
                              concat_epochs=True,
                              n_jobs=1,
                              verbose=None):
    """ Compute phase-amplitude coupling using pacpy.

    Parameters
    ----------
    data : array, shape ([n_epochs], n_channels, n_times)
        The data used to calculate PAC
    sfreq : float
        The sampling frequency of the data
    f_phase : array, dtype float, shape (2,)
        The frequency range to use for low-frequency phase carrier.
    f_amp : array, dtype float, shape (2,)
        The frequency range to use for high-frequency amplitude modulation.
    ixs : array-like, shape (n_pairs x 2)
        The indices for low/high frequency channels. PAC will be estimated
        between n_pairs of channels. Indices correspond to rows of `data`.
    pac_func : string, ['plv', 'glm', 'mi_canolty', 'mi_tort', 'ozkurt']
        The function for estimating PAC. Corresponds to functions in pacpy.pac
    ev : array-like, shape (n_events,) | None
        Indices for events. To be supplied if data is 2D and output should be
        split by events. In this case, tmin and tmax must be provided
    ev_grouping : array-like, shape (n_events,) | None
        Calculate PAC in each group separately, the output will then be of
        length unique(ev)
    tmin : float | None
        If ev is not provided, it is the start time to use in inst. If ev
        is provided, it is the time (in seconds) to include before each
        event index.
    tmax : float | None
        If ev is not provided, it is the stop time to use in inst. If ev
        is provided, it is the time (in seconds) to include after each
        event index.
    baseline : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the amplitude baseline. If None, no baseline is applied.
    baseline_kind : str
        What kind of baseline to use. See mne.baseline.rescale for options.
    scale_amp_func : None | function
        If not None, will be called on each amplitude signal in order to scale
        the values. Function must accept an N-D input and will operate on the
        last dimension. E.g., skl.preprocessing.scale
    use_times : array, shape (2,) | None
        If ev is provided, it is the min/max time (in seconds) to include in
        the PAC analysis. If None, the whole window (tmin to tmax) is used.
    npad : int | 'auto'
        The amount to pad each signal by before calculating phase/amplitude if
        the input signal is type Raw. If 'auto' the signal will be padded to
        the next power of 2 in length.
    return_data : bool
        If True, return the phase and amplitude data along with the PAC values.
    concat_epochs : bool
        If True, epochs will be concatenated before calculating PAC values. If
        epochs are relatively short, this is a good idea in order to improve
        stability of the PAC metric.
    n_jobs : int
        Number of CPUs to use in the computation.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    pac_out : array, dtype float, shape (n_pairs, [n_events])
        The computed phase-amplitude coupling between each pair of data sources
        given in ixs.
    """
    from pacpy import pac as ppac
    if pac_func not in _pac_funcs:
        raise ValueError("PAC function {0} is not supported".format(pac_func))
    func = getattr(ppac, pac_func)
    ixs = np.array(ixs, ndmin=2)
    f_phase = np.atleast_2d(f_phase)
    f_amp = np.atleast_2d(f_amp)

    if data.ndim != 2:
        raise ValueError('Data must be shape (n_channels, n_times)')
    if ixs.shape[1] != 2:
        raise ValueError('Indices must have have a 2nd dimension of length 2')
    for ifreqs in [f_phase, f_amp]:
        if ifreqs.ndim > 2:
            raise ValueError('frequencies must be of shape (n_freq, 2)')
        if ifreqs.shape[1] != 2:
            raise ValueError('Phase frequencies must be of length 2')

    print('Pre-filtering data and extracting phase/amplitude...')
    hi_phase = pac_func in _hi_phase_funcs
    data_ph, data_am, ix_map_ph, ix_map_am = _pre_filter_ph_am(
        data, sfreq, ixs, f_phase, f_amp, npad=npad, hi_phase=hi_phase)
    ixs_new = [(ix_map_ph[i], ix_map_am[j]) for i, j in ixs]

    if ev is not None:
        use_times = [tmin, tmax] if use_times is None else use_times
        ev_grouping = np.ones_like(ev) if ev_grouping is None else ev_grouping
        data_ph, times, msk_ev = _array_raw_to_epochs(data_ph, sfreq, ev, tmin,
                                                      tmax)
        data_am, times, msk_ev = _array_raw_to_epochs(data_am, sfreq, ev, tmin,
                                                      tmax)

        # In case we cut off any events
        ev, ev_grouping = [i[msk_ev] for i in [ev, ev_grouping]]

        # Baselining before returning
        rescale(data_am, times, baseline, baseline_kind, copy=False)
        msk_time = _time_mask(times, *use_times)
        data_am, data_ph = [i[..., msk_time] for i in [data_am, data_ph]]

        # Stack epochs to a single trace if specified
        if concat_epochs is True:
            ev_unique = np.unique(ev_grouping)
            concat_data = []
            for i_ev in ev_unique:
                msk_events = ev_grouping == i_ev
                concat_data.append(
                    [np.hstack(i[msk_events]) for i in [data_am, data_ph]])
            data_am, data_ph = zip(*concat_data)
    else:
        data_ph = np.array([data_ph])
        data_am = np.array([data_am])
    data_ph = list(data_ph)
    data_am = list(data_am)

    if scale_amp_func is not None:
        for i in range(len(data_am)):
            data_am[i] = scale_amp_func(data_am[i], axis=-1)

    n_ep = len(data_ph)
    pac = np.zeros([n_ep, len(ixs_new)])
    pbar = ProgressBar(n_ep)
    for iep, (ep_ph, ep_am) in enumerate(zip(data_ph, data_am)):
        for iix, (i_ix_ph, i_ix_am) in enumerate(ixs_new):
            # f_phase and f_amp won't be used in this case
            pac[iep, iix] = func(ep_ph[i_ix_ph],
                                 ep_am[i_ix_am],
                                 f_phase,
                                 f_amp,
                                 filterfn=False)
        pbar.update_with_increment_value(1)
    if return_data:
        return pac, data_ph, data_am
    else:
        return pac
コード例 #16
0
sensor_groupings = {
    "A": [i for i in info["ch_names"] if "MLO" in i],
    "B": [i for i in info["ch_names"] if "MRO" in i],
    "C": [i for i in info["ch_names"] if "MLC" in i],
    "D": [i for i in info["ch_names"] if "MRC" in i],
    "E": [i for i in info["ch_names"] if "0" in i],
    "F": [i for i in info["ch_names"] if "C" in i]
}

# read data
beh = pd.read_pickle(beh_file)

onsets = [mne.read_epochs(i) for i in onset_files]
onsets = np.vstack([i.pick_types(ref_meg=False).get_data() for i in onsets])

onsets = rescale(onsets, onset_times, (-0.5, 0.4), mode="mean")

obs = [mne.read_epochs(i) for i in obs_files]
obs = np.vstack([i.pick_types(ref_meg=False).get_data() for i in obs])

obs = rescale(obs, obs_times, (1.6, 2.6), mode="mean")
obs = rescale(obs, obs_times, (1.5, 1.6), mode="mean")

data = np.concatenate([onsets, obs[:, :, 500:]], axis=-1)

labels = np.array(beh.movement_dir_sign)

# keys loop
for key in sensor_groupings.keys():
    ch_selection = mne.pick_channels(info["ch_names"], sensor_groupings["D"])
コード例 #17
0
    # Plot
    fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
    colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
    for ((freq_name, fmin, fmax),
         average), color, ax in zip(frequency_map, colors,
                                    axes.ravel()[::-1]):
        times = average.times * 1e3
        gfp = np.sum(average.data**2, axis=0)
        gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
        ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
        ax.axhline(0, linestyle='--', color='grey', linewidth=2)
        ci_low, ci_up = bootstrap_confidence_interval(average.data,
                                                      random_state=0,
                                                      stat_fun=stat_fun)
        ci_low = rescale(ci_low, average.times, baseline=(None, 0))
        ci_up = rescale(ci_up, average.times, baseline=(None, 0))
        ax.fill_between(times,
                        gfp + ci_up,
                        gfp - ci_low,
                        color=color,
                        alpha=0.3)
        ax.grid(True)
        ax.set_ylabel('GFP')
        ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
                    xy=(0.95, 0.8),
                    horizontalalignment='right',
                    xycoords='axes fraction')
        ax.set_xlim(tmin * 1000, tmax * 1000)

    axes.ravel()[-1].set_xlabel('Time [ms]')
コード例 #18
0
# Get labels from FreeSurfer cortical parcellation
labels = mne.read_labels_from_annot("subject_1", parc="PALS_B12_Brodmann", regexp="Brodmann", subjects_dir=subjects_dir)

# Average the source estimates within eachh label using sign-flips to reduce
# signal cancellations, also here we return a generator
src_nrm = inverse_nrm["src"]
label_ts_nrm = mne.extract_label_time_course(stcs_nrm, labels, src_nrm, mode="mean_flip", return_generator=False)

src_hyp = inverse_hyp["src"]
label_ts_hyp = mne.extract_label_time_course(stcs_hyp, labels, src_hyp, mode="mean_flip", return_generator=False)

# standardize TS's
label_ts_nrm_rescaled = []
for j in range(len(label_ts_nrm)):
    label_ts_nrm_rescaled += [rescale(label_ts_nrm[j], epochs_nrm.times, baseline=(None, -0.5), mode="zscore")]

label_ts_hyp_rescaled = []
for j in range(len(label_ts_hyp)):
    label_ts_hyp_rescaled += [rescale(label_ts_hyp[j], epochs_hyp.times, baseline=(None, -0.5), mode="zscore")]


from_time = np.abs(stcs_nrm[0].times + 0).argmin()
to_time = np.abs(stcs_nrm[0].times - 0.2).argmin()

label_ts_nrm_rescaled_crop = []
for j in range(len(label_ts_nrm)):
    label_ts_nrm_rescaled_crop += [label_ts_nrm_rescaled[j][:, from_time:to_time]]

label_ts_hyp_rescaled_crop = []
for j in range(len(label_ts_hyp)):