Пример #1
0
def ttest_pair(data_path, hands, subjects, tmin,
               tmax):  # n - количество временных отчетов
    contr = np.zeros((len(subjects), 2, 102))  # avarage data in time interval

    for ind, subj in enumerate(subjects):
        temp1 = mne.Evoked(
            op.join(
                data_path,
                'beta_ave_comb_planar_self_paced_{0}_hand_15_25/{1}_comb_planar_{0}_hand_self_paced.fif'
                .format(hands[0], subj))).crop(tmin=tmin, tmax=tmax)
        data1 = temp1.data.mean(axis=1)

        temp2 = mne.Evoked(
            op.join(
                data_path,
                'beta_ave_comb_planar_self_paced_{0}_hand_15_25/{1}_comb_planar_{0}_hand_self_paced.fif'
                .format(hands[1], subj))).crop(tmin=tmin, tmax=tmax)
        data2 = temp2.data.mean(axis=1)

        contr[ind, 0, :] = data1
        contr[ind, 1, :] = data2

    comp1 = contr[:, 0, :]
    comp2 = contr[:, 1, :]
    t_stat, p_val = stats.ttest_rel(comp2, comp1, axis=0)

    #averaging by subjects
    comp1_mean = comp1.mean(axis=0)
    comp2_mean = comp2.mean(axis=0)

    return t_stat, p_val, comp1_mean, comp2_mean
Пример #2
0
def container_process(conf):
    print('\trun tfr container...')
    path_home = conf.path_home
    kind = conf.kind
    train = conf.train
    frequency = conf.frequency
    spec = conf.spec
    data_path = conf.data_path
    verbose = conf.verbose

    donor = mne.Evoked(f'{path_home}donor-ave.fif', verbose = 'ERROR')
    fpath_events = conf.path_mio + '/mio_out_{}/{}_run{}_mio_corrected_{}{}.txt'
    plot_spectrogram = conf.plot_spectrogram
    single_trial = conf.single_trial

    #get rid of runs, leave frequency data for pos and neg feedback for time course plotting 
    for i in range(len(kind)):
        for subject in conf.subjects:
            data = []
            processing_done = False
            if not plot_spectrogram:
                out_file = conf.path_container + "{}_{}{}_{}{}-ave.fif".format(subject, spec, kind[i], frequency, train)
            else:
                out_file = conf.path_container + "{0}_{1}{2}_{3}{4}-50ms-tfr.h5".format(subject, spec, kind[i], frequency, train)
            for run in conf.runs:
                print('\t\t', kind[i], run, subject)
                path_events = fpath_events.format(kind[i], subject, run, kind[i], train)
                if pathlib.Path(path_events).exists():
                    if verbose:
                        print('This file is being processed: ', path_events)

                    freq_file = conf.path_tfr + data_path.format(subject, run, spec, frequency, kind[i], train)
                    old_level = mne.set_log_level(verbose='ERROR', return_old_level=True)
                    freq_data = mne.time_frequency.read_tfrs(freq_file)[0]
                    mne.set_log_level(verbose=old_level)
                    data.append(freq_data.data)

                    processing_done = True

                if run == conf.runs[-1] and processing_done:
                    container_results(plot_spectrogram, single_trial, freq_data, data, donor, out_file, verbose)

    if plot_spectrogram:
        sdata = []
        for subject in conf.subjects:
            out_file = conf.path_container + "{0}_{1}{2}_{3}{4}-50ms-tfr.h5".format(subject, spec, kind[0], frequency, train)
            if pathlib.Path(out_file).exists():
                freq_subject_data = mne.time_frequency.read_tfrs(out_file)[0]
                sdata.append(freq_subject_data)

        freq_spec_data = mne.grand_average(sdata)
        title = f'Spectrogram ~{conf.L_freq}-{conf.H_freq} Hz TFR in {kind[0]}'
        PM = freq_spec_data.plot(picks='meg', fmin=conf.L_freq, fmax=conf.H_freq, vmin=-0.15, vmax=0.15, title=title)
        os.chdir('/home/asmyasnikova83/')
        PM.savefig('output.png')
        print('\tSpectrogramm completed')

    print('\ttfr container completed')
Пример #3
0
def min_beta_and_time_for_min(subjects, data_path, planar, tmin,
                              tmax):  # for ANOVA table
    #Make list of evoked
    all_evoked = []
    for subj in subjects:
        #data = op.join(data_path, planar.format(subj, h)) # for extremums_search
        data = op.join(data_path, planar.format(subj))  # for ANOVA table
        evk = mne.Evoked(data)
        all_evoked.append(evk)

    #shift time scale if it is needed
    #what the hell is going on here????????
    #for i in all_evoked:
    #   i.shift_time(-2.0, relative=False)

# calculate average data for interval. You have to choose interval - downward, upward or susteined (look at the beggining of script)
    interval = []

    for i in all_evoked:
        x = i.crop(tmin=tmin, tmax=tmax)  #crop - mne function
        interval.append(x)

        # x = list of Evoked

    min_beta = []
    index_min_beta = []  #for searching of time point of min beta on sensor
    for i in interval:
        m = np.min(i.data, axis=1)
        t = np.argmin(i.data, axis=1)  #indexes of min
        a = m.tolist()
        b = t.tolist()

        min_beta.append(a)
        index_min_beta.append(b)

    # search time of beta minimum
    time = []
    for i in range(len(index_min_beta)):
        time_per_subj = []  # 102 time point for every sensors
        for j in index_min_beta[i]:
            # find time between time points
            #interval[0].data.shape[1] - amount of points on interval
            # j - index of extremum point
            time_between_points = (tmin - tmax) / (interval[0].data.shape[1] -
                                                   1)
            t = tmin - time_between_points * j
            time_per_subj.append(t)
        time.append(time_per_subj)

    #make np.array from list
    min_interval_array = np.array(min_beta)
    min_time_array = np.array(time)

    return (min_interval_array, min_time_array)
Пример #4
0
def ttest_vs_zero(data_path, h, subjects, tmin, tmax): 
	contr = np.zeros((len(subjects), 1, 102))

	for ind, subj in enumerate(subjects):
		temp1 = mne.Evoked(op.join(data_path, 'beta_ave_comb_planar_self_paced_{0}_hand_15_25/{1}_comb_planar_{0}_hand_self_paced.fif'.format(h, subj))).crop(tmin=tmin, tmax=tmax)
		data1 = temp1.data.mean(axis = 1)
		contr[ind, 0, :] = data1
				
	comp1 = contr[:, 0, :]
	t_stat, p_val = stats.ttest_1samp(comp1, 0, axis=0)

	comp1_mean = comp1.mean(axis=0)
		
	return t_stat, p_val, comp1_mean	
Пример #5
0
def calculateEvoked(subjectID):
    subject = 'dh{:#02d}a'.format(subjectID)
    outDir = os.environ['DATDIR'] + 'MEG_mc_hp004_ica_l50/' + subject
    docDir = os.environ['DOCDIR'] + 'misc'
    if not os.path.isdir(docDir):
        os.makedirs(docDir)
    pl.clf()
    ax = pl.gca()
    docFilename = '{}/{}.png'.format(docDir, subject)
    for cond in conds:
        evokedFilename = '{}/{}_average-ave.fif'.format(outDir, cond)
        print evokedFilename
        evoked = mne.Evoked(fname=evokedFilename,
                            condition=0,
                            baseline=None,
                            kind='average',
                            verbose=False)
        misc = evoked.data[309, :]
        pl.plot(evoked.times, misc, label=cond)
    ax.set_xlim(xlims)
    pl.legend(loc=1)
    pl.savefig(docFilename, dpi=150, overwrite=True)

    pl.clf()
    ax = pl.gca()
    ax.set_ylim([0, 0.25])
    ax.set_xlim(xlims)
    docFilename = '{}/{}-{}.png'.format(docDir, subject, 'smooth')
    for cond in conds:
        condColor = colors[cond]
        evokedFilename = '{}/{}-epo.fif'.format(outDir, cond)
        print evokedFilename
        epochs = mne.read_epochs(fname=evokedFilename)
        for i in range(len(epochs)):
            misc = np.squeeze(epochs[i].get_data()[0, 309, :])
            pl.plot(epochs.times,
                    misc,
                    label=cond,
                    color=condColor,
                    linewidth=0.008,
                    alpha=0)
            ax.fill_between(epochs.times,
                            0,
                            misc,
                            color=condColor,
                            alpha=0.005,
                            linewidth=0)
    pl.savefig(docFilename, dpi=200, overwrite=True)
Пример #6
0
def get_mne_evoked(ndvar=False):
    """MNE-Python Evoked

    Parameters
    ----------
    ndvar : bool
        Convert to NDVar (default False).
    """
    data_path = mne.datasets.sample.data_path()
    evoked_path = os.path.join(data_path, 'MEG', 'sample',
                               'sample_audvis-ave.fif')
    evoked = mne.Evoked(evoked_path, "Left Auditory")
    if ndvar:
        return load.fiff.evoked_ndvar(evoked)
    else:
        return evoked
Пример #7
0
"""
# Author: Jussi Nurminen ([email protected])
#
# License: BSD (3-clause)

import mne
import os
from mne.datasets import hf_sef

fname_evoked = os.path.join(hf_sef.data_path(),
                            'MEG/subject_b/hf_sef_15min-ave.fif')

print(__doc__)

###############################################################################
# Read evoked data
evoked = mne.Evoked(fname_evoked)

###############################################################################
# Create a highpass filtered version
evoked_hp = evoked.copy()
evoked_hp.filter(l_freq=300, h_freq=None, fir_design='firwin')

###############################################################################
# Compare high-pass filtered and unfiltered data on a single channel
ch = 'MEG0443'
pick = evoked.ch_names.index(ch)
edi = {'HF': evoked_hp, 'Regular': evoked}
mne.viz.plot_compare_evokeds(edi, picks=pick)
def topo_stat(conf):
    print('\trun fdr for topomaps...')
    grand_average = conf.grand_average
    time = conf.time
    times_to_plot = conf.times_to_plot
    p_mul_topo = conf.p_mul_topo
    p_mul_topo_contrast = conf.p_mul_topo_contrast
    p_mul_topo_fdr_contrast = conf.p_mul_topo_fdr_contrast
    baseline = conf.baseline
    legend = conf.legend

    cur_dir = conf.path_fdr
    verbose = conf.verbose

    os.chdir(cur_dir)

    topomaps = [
        f'{legend[0]}', f'{legend[1]}', 'difference_fdr', 'difference_deep_fdr'
    ]
    options = {
        'page-size': 'A3',
        'orientation': 'Landscape',
        'zoom': 1.0,
        'no-outline': None,
        'quiet': ''
    }

    os.makedirs(os.path.join(conf.path_fdr, f'{legend[0]}_vs_{legend[1]}'),
                exist_ok=True)
    if grand_average == True:
        frequency = None
    else:
        frequency = conf.frequency

    #donor data file
    temp = mne.Evoked(f'{conf.path_home}donor-ave.fif', verbose='ERROR')
    temp.times = conf.time
    subjects = conf.subjects
    comp1_mean, comp2_mean, contr, temp1, temp2, p_val, binary, subjects1 = compute_p_val(
        conf, subjects, conf.kind, conf.train, frequency, conf.check_num_sens)
    df1 = contr[:, 0, 204:, :]  #per channel
    df2 = contr[:, 1, 204:, :]
    if verbose:
        print('df1 shape', df1.shape)
        print('df2 shape', df2.shape)

    #res_tfce = np.zeros((876, 102))
    pos = io.loadmat(f'{conf.path_home}pos_store.mat')['pos']
    chan_labels = to_str_ar(
        io.loadmat(f'{conf.path_home}channel_labels.mat')['chanlabels'])
    dict_col = {
        'risk': 'salmon',
        'norisk': 'olivedrab',
        'prerisk': 'mediumpurple',
        'postrisk': 'darkturquoise'
    }
    p_val_fdr = space_fdr(p_val)

    ##### CONDITION1 and Stat  ######
    # average = 0.1 means averaging of the power data over 100 ms
    if verbose:
        print('comp1_mean', comp1_mean.shape)
        print('df1', df1.shape)
        print('time', len(time))
        print('times to plot', len(times_to_plot))
    t_stat_con1, p_val_con1 = stats.ttest_1samp(df1, 0, axis=0)
    if verbose:
        print('p val con1 shape', p_val_con1.shape)
    width, height = p_val_con1.shape
    p_val_con1 = p_val_con1.reshape(width * height)
    _, p_val_con1 = mul.fdrcorrection(p_val_con1)
    p_val_con1 = p_val_con1.reshape((width, height))
    binary = p_val_binary(p_val_con1, treshold=0.05)

    temp.data = comp1_mean[204:, :]
    fig = temp.plot_topomap(times=times_to_plot,
                            average=0.1,
                            units="dB",
                            scalings=dict(eeg=1e6, grad=1, mag=1e15),
                            ch_type='planar1',
                            time_unit='s',
                            show=False,
                            title=legend[0] + ' stat fdr against zero',
                            colorbar=True,
                            vmax=p_mul_topo,
                            vmin=-p_mul_topo,
                            extrapolate="local",
                            mask=np.bool_(binary),
                            mask_params=dict(marker='o',
                                             markerfacecolor='w',
                                             markeredgecolor='k',
                                             linewidth=0,
                                             markersize=7,
                                             markeredgewidth=2))
    fig.savefig(os.path.join(conf.path_fdr, legend[0] + '_vs_' + legend[1],
                             legend[0] + '.png'),
                dpi=300)
    plt.close()

    ##### CONDITION2 and Stat ######

    #temp.data = comp2_mean[204:,:]
    t_stat_con2, p_val_con2 = stats.ttest_1samp(df2, 0, axis=0)
    width, height = p_val_con2.shape
    p_val_con2 = p_val_con2.reshape(width * height)
    _, p_val_con2 = mul.fdrcorrection(p_val_con2)
    p_val_con2 = p_val_con2.reshape((width, height))
    binary = p_val_binary(p_val_con2, treshold=0.05)

    temp.data = comp2_mean[204:, :]
    fig = temp.plot_topomap(times=times_to_plot,
                            average=0.1,
                            units='dB',
                            scalings=dict(eeg=1e6, grad=1, mag=1e15),
                            ch_type='planar1',
                            time_unit='s',
                            show=False,
                            title=legend[1] + ' stat fdr  against zero',
                            colorbar=True,
                            vmax=p_mul_topo,
                            vmin=-p_mul_topo,
                            extrapolate="local",
                            mask=np.bool_(binary),
                            mask_params=dict(marker='o',
                                             markerfacecolor='w',
                                             markeredgecolor='k',
                                             linewidth=0,
                                             markersize=7,
                                             markeredgewidth=2))
    fig.savefig(os.path.join(conf.path_fdr, legend[0] + '_vs_' + legend[1],
                             legend[1] + '.png'),
                dpi=300)
    plt.close()

    ##### CONDITION2 - CONDITION1 with marks no time  (space FDR) ######
    p_val_fdr = space_fdr(p_val)
    binary_fdr = p_val_binary(p_val_fdr, treshold=0.05)
    temp.data = comp2_mean[204:, :] - comp1_mean[204:, :]
    fig = temp.plot_topomap(
        times=times_to_plot,
        average=0.1,
        units='dB',
        scalings=dict(eeg=1e6, grad=1, mag=1e15),
        ch_type='planar1',
        time_unit='s',
        show=False,
        title='%s - %s  space fdr: marks for each head separately' %
        (legend[1], legend[0]),
        colorbar=True,
        vmax=p_mul_topo_contrast,
        vmin=-p_mul_topo_contrast,
        extrapolate="local",
        mask=np.bool_(binary_fdr[204:, :]),
        mask_params=dict(marker='o',
                         markerfacecolor='w',
                         markeredgecolor='k',
                         linewidth=0,
                         markersize=7,
                         markeredgewidth=2))
    fig.savefig(os.path.join(conf.path_fdr, legend[0] + '_vs_' + legend[1],
                             'difference_fdr.png'),
                dpi=300)
    plt.close()

    #### CONDITION2 - CONDITION1 with marks (WITH FDR) deep FDR with time ####

    t_stat, p_val_deep = stats.ttest_rel(df1, df2, axis=0)
    width, height = p_val_deep.shape
    p_val_resh = p_val_deep.reshape(width * height)
    _, p_val_deep_fdr = mul.fdrcorrection(p_val_resh)
    p_val_deep_fdr = p_val_deep_fdr.reshape((width, height))
    binary_deep_fdr = p_val_binary(p_val_deep_fdr, treshold=0.05)
    temp.data = comp2_mean[204:, :] - comp1_mean[204:, :]

    fig = temp.plot_topomap(times=times_to_plot,
                            average=0.1,
                            units='dB',
                            scalings=dict(eeg=1e6, grad=1, mag=1e15),
                            ch_type='planar1',
                            time_unit='s',
                            show=False,
                            title='%s - %s deep fdr over all heads' %
                            (legend[1], legend[0]),
                            colorbar=True,
                            vmax=p_mul_topo_fdr_contrast,
                            vmin=-p_mul_topo_fdr_contrast,
                            extrapolate="local",
                            mask=np.bool_(binary_deep_fdr),
                            mask_params=dict(marker='o',
                                             markerfacecolor='w',
                                             markeredgecolor='k',
                                             linewidth=0,
                                             markersize=7,
                                             markeredgewidth=2))
    fig.savefig(os.path.join(conf.path_fdr, legend[0] + '_vs_' + legend[1],
                             'difference_deep_fdr.png'),
                dpi=300)
    plt.close()
    html_name = os.path.join(conf.path_fdr,
                             legend[0] + '_vs_' + legend[1] + '.html')
    clear_html(html_name)
    add_str_html(html_name, '<!DOCTYPE html>')
    add_str_html(html_name, '<html>')
    add_str_html(html_name, '<body>')
    if grand_average:
        add_str_html(
            html_name,
            '<p style="font-size:20px;"><b> %s, %s, trained, %s, %s, %d subjects </b></p>'
            % (legend[0] + '_vs_' + legend[1], conf.ERF, baseline,
               conf.zero_point, len(subjects1)))
    else:
        add_str_html(
            html_name,
            '<p style="font-size:20px;"><b> %s, %s, trained, %s, %s, %d subjects </b></p>'
            % (legend[0] + '_vs_' + legend[1], frequency, baseline,
               conf.zero_point, len(subjects1)))

    add_str_html(
        html_name,
        '<p style="font-size:20px;"><b> boolean fdr  = 1  marked </b></p>')
    add_str_html(html_name, '<table>')
    for topo in topomaps:
        add_str_html(html_name, "<tr>")
        add_pic_topo_html(
            html_name,
            os.path.join(legend[0] + '_vs_' + legend[1], topo + '.png'))
    add_str_html(html_name, "</tr>")
    add_str_html(html_name, '</body>')
    add_str_html(html_name, '</html>')
    pdf_file = html_name.replace("html", "pdf")
    if verbose:
        print('All done!')
    print('\tfdr for topomaps completed')
Пример #9
0
def container_process(conf):
    print('\trun tfr container...')
    path_home = conf.path_home
    kind = conf.kind
    train = conf.train
    frequency = conf.frequency
    spec = conf.spec
    data_path = conf.data_path
    verbose = conf.verbose

    donor = mne.Evoked(f'{path_home}donor-ave.fif', verbose = 'ERROR')
    fpath_events = conf.path_mio + '/mio_out_{}/{}_run{}_mio_corrected_{}{}.txt'

    #get rid of runs, leave frequency data for pos and neg feedback for time course plotting 
    for i in range(len(kind)):
        for subject in conf.subjects:
            stc_list = []
            processing_done = False
            out_file = '{subject}_morphed'
            for run in conf.runs:
                print('\t\t', kind[i], run, subject)
                path_events = fpath_events.format(kind[i], subject, run, kind[i], train)
                if pathlib.Path(path_events).exists():
                    if verbose:
                        print('This file is being processed: ', path_events)

                    stc_file = '{subject}_run{run}morphed'
                    old_level = mne.set_log_level(verbose='ERROR', return_old_level=True)
                    stc_morph_data = mne.read_source_estimate(stc_file)
                    mne.set_log_level(verbose=old_level)
                    data.append(freq_data.data)

                    processing_done = True

                if run == conf.runs[-1] and processing_done:
                    container_results(freq_data, data, donor, out_file, verbose)

                    #inverse operator
                    inverse_operator = mne.minimum_norm.make_inverse_operator(epochs_of_interest.info, fwd, noise_cov=noise_covv,
                               loose=0.2, depth=0.8, verbose=True)
                    print('Inverse operator ready')
                    #bands = dict(theta=[4,8])
                    #f_step = 1
                    #stc = mne.minimum_norm.source_band_induced_power(epochs_of_interest.pick('grad'), inverse_operator,\
                    #                          bands, use_fft=False, df = f_step, n_cycles = 2)["theta"]
                    #sourse estimates on each epoch
                    stc = mne.minimum_norm.apply_inverse_epochs(epochs_of_interest, inverse_operator, method=method,\
                           lambda2=lambda2, pick_ori='normal')
                    stc=np.mean(stc)
                    morph = mne.compute_source_morph(stc, subject_from=subject, subject_to='fsaverage')
                    stc_fsaverage = morph.apply(stc)
                    print('Morphing done!')
                    print(stc_fsaverage)
                    stc_morph_name = '/MORPH/{subject}_run{run}morphed'
                    stc_fsaverage.save(stc_morph_name)
                    print('Morphing saved')
                    '''
                    stc_morph0 = mne.read_source_estimate(stc_morph_name)
                    stc_list.append(stc_morph0)
                    V_all = np.mean([stc_list[0].data], axis=0)
                    stc_av = (stc_list[0])/2
                    #V_all = np.mean([stc_list[0].data, stc_list[1].data], axis=0)
                    #stc_av = (stc_list[0] + stc_list[1])/2
                    stc_new = mne.SourceEstimate(V_all, [stc_list[0].lh_vertno, stc_list[0].rh_vertno], tmin=-1.4, tstep=stc_av.tstep, subject='fsaverage')
                    print(stc_new)
                    V_all = [stc_new.data]
                    num0 = np.argmax(stc_new.data.mean(axis=0))
                    init_time = stc_new.times[num0]
Пример #10
0
fmin, fmax = [0, 15]
fmid = int((fmin + fmax) / 2.)

times = np.arange(-0.1, 1.5, 0.1) # Timing of the source time course (shouldn't be too big)

for nip in nips:    
    for bloc in bloc_names:
        for stimulus in stimulus_names:

            ###################################################################
            ########################### Importation ###########################
            ###################################################################

            # Evoked responses            
            fname = data_evoked_directory + bloc + '/' + nip + '/' + stimulus
            evoked = mne.Evoked(fname + '-ave.fif')

            # Corresponding inverse operator
            inverse_operator = read_inverse_operator(fname + '-inv.fif')
            
            ###################################################################
            ######################### Source estimation #######################
            ###################################################################
            
            stc = apply_inverse(evoked, inverse_operator, lambda2, 
                                method=method, pick_ori=None)

            ###################################################################
            ########################### Visualization #########################
            ###################################################################
            
Пример #11
0
 def process(self):
     data = mne.Evoked(self.parameters["filename"])
     return {"Evoked Data": data}
Пример #12
0
def evoked_ndvar(evoked, name='meg', data=None, exclude='bads', vmax=None):
    """
    Convert one or more mne :class:`Evoked` objects to an :class:`NDVar`.

    Parameters
    ----------
    evoked : str | Evoked | list of Evoked
        The Evoked to convert to NDVar. Can be a string designating a file
        path to a evoked fiff file containing only one evoked.
    name : str
        Name of the NDVar.
    data : 'eeg' | 'mag' | 'grad' | None
        The kind of data to include. If None (default) based on ``epochs.info``.
    exclude : list of string | string
        Channels to exclude (:func:`mne.pick_types` kwarg).
        If 'bads' (default), exclude channels in info['bads'].
        If empty do not exclude any.
    vmax : None | scalar
        Set a default range for plotting.

    Notes
    -----
    If evoked objects have different channels, the intersection is used (i.e.,
    only the channels present in all objects are retained).
    """
    if isinstance(evoked, basestring):
        evoked = mne.Evoked(evoked)

    if data is None:
        if isinstance(evoked, (tuple, list)):
            data_set = {_guess_ndvar_data_type(e.info) for e in evoked}
            if len(data_set) > 1:
                raise ValueError("Different Evoked objects contain different "
                                 "data types: %s" % ', '.join(data_set))
            data = data_set.pop()
        else:
            data = _guess_ndvar_data_type(evoked.info)

    if data == 'mag':
        info = _cs.meg_info(vmax)
    elif data == 'eeg':
        info = _cs.eeg_info(vmax)
    elif data == 'grad':
        info = _cs.meg_info(vmax, unit='T/cm')
    else:
        raise ValueError("data=%s" % repr(data))

    if isinstance(evoked, mne.Evoked):
        picks = _picks(evoked.info, data, exclude)

        x = evoked.data[picks]
        sensor = sensor_dim(evoked, picks=picks)
        time = UTS.from_int(evoked.first, evoked.last, evoked.info['sfreq'])
        dims = (sensor, time)
    else:
        e0 = evoked[0]

        # find common channels
        all_chs = set(e0.info['ch_names'])
        exclude = set(e0.info['bads'])
        times = e0.times
        for e in evoked[1:]:
            chs = set(e.info['ch_names'])
            all_chs.update(chs)
            exclude.update(e.info['bads'])
            missing = all_chs.difference(chs)
            exclude.update(missing)
            if not np.all(e.times == times):
                raise ValueError("Not all evoked have the same time points.")

        # get data
        x = []
        sensor = None
        exclude = list(exclude)
        for e in evoked:
            picks = _picks(e.info, data, exclude)
            x.append(e.data[picks])
            if sensor is None:
                sensor = sensor_dim(e, picks=picks)

        time = UTS.from_int(e0.first, e0.last, e0.info['sfreq'])
        dims = ('case', sensor, time)

    return NDVar(x, dims, info=info, name=name)
Пример #13
0
from scipy import io
io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')

epochs.save('sample-epo.fif')

evoked = epochs['aud_l'].average()
print(evoked)

max_in_each_epoch = [e.max() for e in epochs['aud_l']]
print(max_in_each_epoch[:4])

c_path = '/Users/wang/mne/tmp_data/'
evoked_fname = data_path + 'sample_audvis-ave.fif'
# evoked1 = mne.fiff.Evoked(evoked_fname, setno=1, baseline=(None, 0), proj=True)
evoked1 = mne.Evoked(evoked_fname, condition=0, proj=True)  #baseline=(None, 0) was not applicable
evoked2 = mne.Evoked(evoked_fname, condition=1, proj=True)  
evoked3 = mne.Evoked(evoked_fname, condition='Left visual', proj=True)  

# contrast = evoked1 - evoked2    #unsupported operand type(s) for -: 'Evoked' and 'Evoked'

# # # # # # # # # # # # # # # # # # 
# # # Time-Frequency: Induced power and phase-locking values¶
# # # # # # # # # # # # # # # # # # 

import numpy as np
n_cycles = 2  # number of cycles in Morlet wavelet
frequencies = np.arange(7, 30, 3)  # frequencies of interest
Fs = raw.info['sfreq']  # sampling in Hz
times = epochs.times
Пример #14
0
kinds = ['pseudos', 'reals']
targets = {'pseudos': ['slukke', 'slitte', 'spykke', 'svaette']}
targets['reals'] = ['slutte', 'slikke', 'spytte', 'svaekke']

in_files = [f for f in os.listdir(gm_folder) if "{0:s}".format(group) in f]
print(in_files)

evokeds = {}

norm = mpl.colors.Normalize(vmin=0, vmax=len(in_files) - 1)
cmap = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.Blues)
cmap.set_array([])
colors = {}

for fileno, fil in enumerate(in_files):
    evoked = mne.Evoked(join(gm_folder, fil))
    colors[str(fileno)] = cmap.to_rgba(fileno)
    evoked.pick_types(meg=True, eeg=False)
    #evokeds.append(evoked)
    evokeds[str(fileno)] = evoked

# GFP PLOTS
picks = mne.pick_types(evokeds["0"].info,
                       meg=True,
                       eeg=False,
                       eog=False,
                       ecg=False,
                       stim=False)
gfp_plot = mne.viz.plot_compare_evokeds(evokeds,
                                        picks=picks,
                                        colors=colors,
Пример #15
0
import mne, os, sys, numpy as np
from config import *
import pathlib

if mode == 'server':
    fpath_ev = '/home/asmyasnikova83/DATA/'
    fpath_fr = '/home/asmyasnikova83/DATA/TFR/'
    temp1 = mne.Evoked('/home/asmyasnikova83/DATA/P006_run6_evoked-ave.fif')
    out_path = '/home/asmyasnikova83/DATA/evoked_ave/'
else:
    fpath_ev = '/home/sasha/MEG/MIO_cleaning/'
    fpath_fr = '/home/sasha/MEG/Time_frequency_analysis/'
    temp1 = mne.Evoked('/home/sasha/MEG/MIO_cleaning/P006_run6_evoked-ave.fif')
    out_path = '/home/sasha/MEG/Evoked/'

if kind == 'negative':
    #explore negative feedback
    if mode == 'server':
        fpath_events = fpath_ev + 'mio_out_negative/{}_run{}_mio_corrected_negative_no_train.txt'
        data_path = fpath_fr + 'negative/{0}_run{1}_theta_negative_no_train_int_50ms-tfr.h5'
    if mode != 'server':
        fpath_events = fpath_ev + '{}_run{}_mio_corrected_negative.txt'
        data_path = fpath_fr + '{0}_run{1}_theta_negative_int_50ms-tfr.h5'
if kind == 'positive':
    if mode == 'server':
        fpath_events = fpath_ev + 'mio_out_positive/{}_run{}_mio_corrected_positive_no_train.txt'
        data_path = fpath_fr + 'positive/{0}_run{1}_theta_positive_no_train_int_50ms-tfr.h5'
    if mode != 'server':
        fpath_events = fpath_ev + '{}_run{}_mio_corrected_positive.txt'
        data_path = fpath_fr + '{0}_run{1}_theta_positive_int_50ms-tfr.h5'
#get rid of runs, leave frequency data for pos and neg feedback for time course plotting
Пример #16
0
 def load_trigger_wise_evokeds(self, folder_path, event_ids):
     trig_wise_evoked = dict()
     for key in event_ids:
         erp_path = folder_path + '/' + key + '_ave.fif'
         trig_wise_evoked[key] = mne.Evoked(erp_path)
     return trig_wise_evoked
Пример #17
0
def compute_p_val(subjects, kind, train, frequency, check_num_sens):
    #coordinates and channel names from matlab files - the files are here https://github.com/niherus/MNE_TFR_ToolBox/tree/master/VISUALISATION

    i = 0
    subjects1 = []
    for ind, subj in enumerate(subjects):
        rf1 = out_path + "{0}_{1}{2}{3}_{4}{5}-ave.fif".format(subj, spec, stimulus, kind[0], frequency, train)
        file1 = pathlib.Path(rf1)
        rf2 = out_path + "{0}_{1}{2}{3}_{4}{5}-ave.fif".format(subj, spec, stimulus, kind[1], frequency, train)
        file2 = pathlib.Path(rf2)
        if file1.exists() and file2.exists():
            print('This subject is being processed: ', subj, ' (', i, ') ( ', ind, ' ) ')
            subjects1.append(subj)
            i = i + 1
    print('i: ', i)
    #a container for tapers in neg and pos reinforcement, i - ov
    contr = np.zeros((i, 2, 306, 876))
    if random_comp:
       length = len(subjects1)
       random_class_one = np.zeros(length)
       random_class_two = np.zeros(length)
       for l in range(length):
           random_class_one[l] = np.around(random.uniform(0, 1))
           random_class_two[l] = np.around(random.uniform(0, 1))
       print('random_class', random_class_one)
       print('random_class', random_class_two)

    i = 0
    rsubjects1 = random.sample(subjects1, k = len(subjects1))
    print('rsubjects1', rsubjects1)
    for ind, subj in enumerate(rsubjects1):
        rf = out_path + "{0}_{1}{2}{3}_{4}{5}-ave.fif".format(subj, spec, stimulus, kind[0], frequency, train)
        print(rf)
        file = pathlib.Path(rf)
        if file.exists():
            print('exists:', rf)
            print('This subject is being processed: ', subj, ' (', i, ') ( ', ind, ' ) ')
            #positive FB
            print('kind[0]', kind[0])
            temp1 = mne.Evoked(out_path + "{0}_{1}{2}{3}_{4}{5}-ave.fif".format(subj, spec, stimulus, kind[0], frequency, train))
            temp1 = temp1.pick_types("grad")
            print('data shape', temp1.data.shape)
            #planars
            if random_comp:
                contr[i, int(random_class_one[i]), :204, :] = temp1.data
                contr[i, int(random_class_one[i]), 204:, :] = temp1.data[::2] + temp1.data[1::2]
            else:
                contr[i, 0, :204, :] = temp1.data
                #combined planars
                contr[i, 0, 204:, :] = temp1.data[::2] + temp1.data[1::2]
            #negative FB
            print('kind[1]', kind[1])
            temp2 = mne.Evoked( out_path + "{0}_{1}{2}{3}_{4}{5}-ave.fif".format(subj, spec, stimulus, kind[1], frequency, train))
            temp2 = temp2.pick_types("grad")
            if random_comp:
                contr[i, int(random_class_two[i]), :204, :] = temp2.data
                contr[i, int(random_class_two[i]), 204:, :] = temp2.data[::2] + temp1.data[1::2]
            else:
                contr[i, 1, :204, :] = temp2.data
                contr[i, 1, 204:, :] = temp2.data[::2] + temp2.data[1::2]
            i = i + 1
    print('CONTR shape', contr.shape)
    comp1 = contr[:, 0, :, :]
    comp2 = contr[:, 1, :, :]

    #check the number of stat significant sensors in a predefined time interval
    t_stat, p_val = stats.ttest_rel(comp1, comp2, axis=0)
    #save_t_stat = True
    if save_t_stat:
        t_stat_str = np.array2string(t_stat)
        print(type(t_stat_str))
        t_stat_file = f'{prefix}t_stat_{kind[0]}_vs_{kind[1]}.txt'
        t_stat_file_name = open(t_stat_file, "w")
        t_stat_file_name.write(t_stat_str)
        t_stat_file_name.close()

    binary = p_val_binary(p_val, treshold = 0.05)

    if check_num_sens:
        issue = binary[204:, 600]
        counter = 0
        for i in range(102):
            if issue[i] == 1:
                print('ch idx', i)
                counter = counter + 1
                print('counter', counter)
    #average the freq data over subjects
    comp1_mean = comp1.mean(axis=0)
    comp2_mean = comp2.mean(axis=0)
    print('COMP1.mean.shape', comp1_mean.shape)
    print('COMP2.mean.shape', comp2_mean.shape)
    return comp1_mean, comp2_mean, contr, temp1, temp2, p_val, binary, subjects1
Пример #18
0
###############################################################################
###################### EXPORT IN R-READABLE FORMAT (TXT) ######################
###############################################################################

if 'df' in locals(): del df

stimulus_names = ['DD', 'DC', 'CC', 'AA', 'AV', 'VV']
bloc_names = ['Auditory', 'Visual', 'Causality', 'Temporal']

for nip in nips:
    for bloc in bloc_names:
        for stimulus in stimulus_names:

            # Read the evoked file
            fname = data_evoked_directory + bloc + '/' + nip + '/' + stimulus + '-ave.fif'
            evoked = mne.Evoked(fname)

            # Convert it to a table
            tab = evoked.to_data_frame()

            # Add context information columns
            tab['subject'] = nip  # Name
            tab['bloc'] = bloc  # Bloc
            tab['stimulus'] = stimulus  # Stimulus

            # Add model activity columns
            signals = stim_dict[returnTriggerCode()[bloc + '/' + stimulus]]
            tab['stimulus_signal_aud'] = np.concatenate(
                [signals['signals']['audio'], [0]])  # Audio
            tab['stimulus_signal_vis'] = np.concatenate(
                [signals['signals']['visual'], [0]])  # Visual
Пример #19
0
output = 'output_topo/'

topomaps = [
    f'{legend[0]}', f'{legend[1]}', 'difference', 'difference_with_fdr'
]
options = {
    'page-size': 'A3',
    'orientation': 'Landscape',
    'zoom': 1.0,
    'no-outline': None,
    'quiet': ''
}
os.makedirs(os.path.join(output, f'{legend[0]}_vs_{legend[1]}'), exist_ok=True)

#donor data file
temp = mne.Evoked(f'{prefix}donor-ave.fif')
temp.times = np.arange(-2.000, 1.502, 0.004)
times_to_plot = np.arange(-2.0, 1.5, 0.2)

comp1_mean, comp2_mean, contr, temp1, temp2, p_val, binary, subjects1 = compute_p_val(
    subjects, kind, train, frequency, check_num_sens)
p_val_fdr = space_fdr(p_val)
#legend = ["Norisk", "Risk"]
rewrite = True

##### CONDITION1 ######
# average = 0.1 means averaging of the power data over 100 ms

temp.data = comp1_mean[204:, :]
fig = temp.plot_topomap(times=times_to_plot,
                        average=0.1,
Пример #20
0
######################################
#  Do source imaging and handle data.#
######################################
if reload_data:
    naves = np.zeros(len(p.subjects), int)
    for si, (subj, struc) in enumerate(zip(p.subjects, p.structurals)):
        print('Loading data for subject %s...' % subj)
        inv_dir = op.join(work_dir, subj, 'inverse')

        # load the inverse
        inv = op.join(inv_dir, '%s-%d-sss-%s-inv.fif' % (subj, p.lp_cut, inv_type))
        inv = read_inverse_operator(inv)
        fname = op.join(inv_dir, '%s_%d-sss_eq_%s-ave.fif'
                        % (p.analyses[0], p.lp_cut, subj))
        aves = [mne.Evoked(fname, cond, baseline=(None, 0), proj=True,
                                kind='average') for cond in conditions]
        nave = np.unique([a.nave for a in aves])
        assert len(nave) == 1
        for ave, cond in zip(aves, conditions):
                assert ave.comment == cond
        naves[si] = nave[0]

        # apply inverse, bin, morph
        stcs = [apply_inverse(ave, inv, lambda2, 'dSPM') for ave in aves]
        stcs = [stc.bin(0.005) for stc in stcs]
        m = mne.compute_morph_matrix(struc, 'fsaverage', stcs[0].vertices,
                                     fs_verts, n_smooth)
        stcs = [stc.morph_precomputed('fsaverage', fs_verts, m)
                for stc in stcs]

        # put in big matrix
Пример #21
0
def find_contrasts(names, stim_files, contrasts, suffix="_noblink", **params):
    contrast_labels = {}
    for i, key in enumerate(contrasts.keys()):
        contrast_labels[key] = 2**i

    evoked = collections.defaultdict(list)
    coverage = collections.defaultdict(list)
    counts = collections.defaultdict(list)

    for name, stim in zip(names, stim_files):
        events = pd.read_table(op.join(data_dir, "events", stim))
        events['samples'] = np.floor(events.Tmu * 1e-6 * 512)
        events['dummy'] = 0
        events['contrasts'] = 0
        for i, vals in enumerate(contrasts.values()):
            events['contrasts'] += (2**i) * events['TriNo'].isin(vals)

        event_mat = events[['samples', 'dummy',
                            'contrasts']].as_matrix().astype('int_')
        raw = mne.io.read_raw_fif(op.join(temp_dir, name + suffix + ".fif"))
        picks = mne.pick_types(raw.info, eeg=True)

        epochs = mne.Epochs(raw,
                            event_mat,
                            event_id=contrast_labels,
                            picks=picks,
                            **params)

        # import pdb; pdb.set_trace()

        means = {}
        for key, val in contrast_labels.items():
            evoked_dir = op.join(temp_dir, "evokeds")
            if not op.isdir(evoked_dir): os.mkdir(evoked_dir)

            evoked_file = op.join(evoked_dir, name + "_" + key + ".fif")
            if op.isfile(evoked_file):
                print evoked_file, "already generated, skipping..."
                means[key] = mne.Evoked(evoked_file, key)
            else:
                means[key] = epochs[key].average()
                means[key].comment = key
                means[key].save(evoked_file)

            N = (event_mat[:, 2] == contrast_labels[key]).sum()
            percent = (float(means[key].nave) / N)
            print "%2.1f%% of %s kept" % (100 * percent, key)
            coverage[key].append(percent)
            counts[key].append(N)

        for key, val in means.items():
            evoked[key].append(val)

        keys = list(means.keys())
        for i in range(len(keys) - 1):
            for j in range(i + 1, len(keys)):
                a = keys[i]
                b = keys[j]
                ab = mne.combine_evoked([evoked[a][-1], evoked[b][-1]],
                                        [1, -1])
                print "Working on: ", a + ' - ' + b
                evoked[a + ' - ' + b].append(ab)

    grand_average = {}
    for key, val in evoked.items():
        grand_average[key] = mne.grand_average(val)
        grand_average[key].comment = key

    return dict(ind=evoked,
                mean=grand_average,
                coverage=coverage,
                counts=counts)
Пример #22
0
            n=n,
            vertices=head.vertices,
            faces=head.faces,
        )
        evoked.save(SAVE_DIR + "left_auditory-ave.fif")


else:

    with np.load(SAVE_DIR + "mne_data.npz", allow_pickle=True) as data:
        mesh = data["mesh"]
        p = data["p"]
        n = data["n"]
        mesh = trimesh.Trimesh(vertices=data["vertices"], faces=data["faces"])

    evoked = mne.Evoked(SAVE_DIR + "left_auditory-ave.fif")


#%%
# Fit the surface current for the auditory evoked response


c = MeshConductor(mesh_obj=mesh, basis_name="suh", N_suh=150)
M = c.mass

sensor_coupling = np.einsum("ijk,ij->ik", c.B_coupling(p), n)
# a = np.linalg.pinv(sensor_coupling, rcond=1e-15) @ field
ss = np.linalg.svd(sensor_coupling @ sensor_coupling.T, False, False)

# reg_exps = [0.5, 1, 2, 3, 4, 5, 6, 7, 8]
reg_exps = [1]
########### Change the lines in the function.py (lines def and data in the extremum search function)

import mne
import os.path as op
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import copy
import pandas as pd
import statsmodels.stats.multitest as mul
import os
from functions import min_beta_and_time_for_min, max_beta_and_time_for_max

#load donor
temp = mne.Evoked(
    "/home/vtretyakova/Рабочий стол/speach_learn/Extremes/donor-ave.fif")

temp.times = np.arange(0, 2.01, 1)

# for selfpased
'''
subjects = []
for i in range(1,29):
    if i < 10:
        subjects += ['L00' + str(i)]
    else:
        subjects += ['L0' + str(i)]
        
subjects.remove('L015') # для данного испытуемого не нашлось подходящих эвентов
'''
Пример #24
0
def grand_average_process(conf):
    print('\trun ERF...')
    kind = conf.kind
    path_home = conf.path_home
    train = conf.train
    spec = conf.spec
    verbose = conf.verbose
    fpath_raw = conf.fpath_raw
    fpath_events = conf.path_mio + '/mio_out_{}/{}_run{}_mio_corrected_{}{}.txt'
    donor = mne.Evoked(f'{path_home}donor-ave.fif', verbose='ERROR')

    for i in range(len(kind)):
        for subject in conf.subjects:
            evoked_ave = []
            processing_done = False
            for run in conf.runs:
                print('\t\t', kind[i], subject, run)

                path_events = fpath_events.format(kind[i], subject, run,
                                                  kind[i], train)
                if verbose:
                    print('path_events', path_events)
                if conf.baseline == 'fixation_cross_norisks':
                    path_events_with_cross = f'{conf.path_mio}/mio_out_norisk/{subject}_run{run}_mio_corrected_norisk.txt'
                else:
                    path_events_with_cross = path_events
                if pathlib.Path(path_events).exists() and os.stat(path_events).st_size != 0 and \
                    pathlib.Path(path_events_with_cross).exists() and os.stat(path_events_with_cross).st_size != 0:

                    out_file = conf.path_GA + "/{}_{}{}_{}_grand_ave.fif".format(
                        subject, spec, kind[i], train)

                    raw_file = fpath_raw.format(subject, run, subject)
                    if verbose:
                        print('raw file path')
                        print(raw_file)

                    raw_data = mne.io.Raw(raw_file,
                                          preload=True,
                                          verbose='ERROR')
                    # for low frequencies, below the peaks of power-line noise low pass filter the data
                    raw_data = raw_data.filter(None, 50, fir_design='firwin')
                    #remove slow drifts
                    raw_data = raw_data.filter(1., None, fir_design='firwin')
                    picks = mne.pick_types(raw_data.info, meg='grad')

                    events_of_interest = retrieve_events(
                        conf, raw_data, path_events, i, False)
                    events_with_cross = retrieve_events(
                        conf, raw_data, path_events_with_cross, i, True)
                    if verbose:
                        print('Done with the events!')
                        print(events_with_cross)
                        print(events_of_interest)

                    BASELINE = compute_baseline_substraction(
                        conf, raw_data, events_with_cross, events_of_interest,
                        picks)
                    if BASELINE.all == 0:
                        if verbose:
                            print('Yes, BASELINE is dummy')
                        if run == conf.runs[-1] and processing_done:
                            container_results(evoked, evoked_ave, donor,
                                              out_file, verbose)
                        continue
                    if verbose:
                        print('\n\nDone with the BASELINE I!')

                    CORRECTED_DATA = correct_baseline_substraction(
                        conf, BASELINE, events_of_interest, raw_data, picks)
                    if verbose:
                        print('\n\nDone with the CORRECTED!')

                    plot_created_epochs_evoked = False
                    epochs_of_interest, evoked = create_mne_epochs_evoked(
                        conf, CORRECTED_DATA, events_of_interest,
                        plot_created_epochs_evoked, raw_data, picks)
                    evoked_ave.append(evoked.data)

                    processing_done = True

                if run == conf.runs[-1] and processing_done:
                    container_results(evoked, evoked_ave, donor, out_file,
                                      verbose)

    print('\tERF completed')