Esempio n. 1
0
def _prepare_forward_fsaverage(cfg):
    assert cfg.fs_subject == 'fsaverage'
    trans = 'fsaverage'  # MNE has a built-in fsaverage transformation
    bem_sol = cfg.fs_subjects_dir / 'fsaverage' / \
        'bem' / 'fsaverage-5120-5120-5120-bem-sol.fif'
    if not bem_sol.exists():
        fetch_fsaverage(cfg.fs_subjects_dir)
    src = mne.setup_source_space(subject='fsaverage',
                                 subjects_dir=cfg.fs_subjects_dir,
                                 spacing=cfg.spacing,
                                 add_dist=False,
                                 n_jobs=cfg.n_jobs)
    return src, trans, str(bem_sol)
def get_FS_data():
    """Obtains the directory of the trans, src, bem and subjects_dir.

    :return: trans, src, bem, subjects_dir (all in strings)
    """
    # Use the fetch_fsaverage function from MNE to get fs data
    fs_dir = fetch_fsaverage(verbose=True)

    # Obtain the subject directory name
    subjects_dir = op.dirname(fs_dir)

    # Set the directory of the trans, src and bem
    trans = op.join(fs_dir, 'bem', 'fsaverage-trans.fif')
    src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
    bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
    return trans, src, bem, subjects_dir
Esempio n. 3
0
"""

# Authors: Alexandre Gramfort <*****@*****.**>
#          Joan Massich <*****@*****.**>
#
# License: BSD Style.

import os.path as op

import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage

# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)

# The files live in:
subject = 'fsaverage'
trans = op.join(fs_dir, 'bem', 'fsaverage-trans.fif')
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')

##############################################################################
# Load the data
# -------------
#
# We use here EEG data from the BCI dataset.

raw_fname, = eegbci.load_data(subject=1, runs=[6])
Esempio n. 4
0
       ylabel='Amplitude (a. u.)')
mne.viz.utils.plt_show()
del stc_vec

###############################################################################
# Morph the output to fsaverage
# -----------------------------
#
# We can also use volumetric morphing to get the data to fsaverage space. This
# is for example necessary when comparing activity across subjects. Here, we
# will use the scalar beamformer example.
# We pass a :class:`mne.SourceMorph` as the ``src`` argument to
# `mne.VolSourceEstimate.plot`. To save some computational load when applying
# the morph, we will crop the ``stc``:

fetch_fsaverage(subjects_dir)  # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'

src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
    src,
    subject_from='sample',
    src_to=src_fs,
    subjects_dir=subjects_dir,
    niter_sdr=[10, 10, 5],
    niter_affine=[10, 10, 5],  # just for speed
    verbose=True)
stc_fs = morph.apply(stc)
del stc

stc_fs.plot(src=src_fs,
Esempio n. 5
0
import os.path as op

import numpy as np
import matplotlib.pyplot as plt

import mne
from mne.datasets import fetch_fsaverage

# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject
# which is in MNI space
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subjects_dir = op.join(sample_path, 'subjects')

# use mne-python's fsaverage data
fetch_fsaverage(subjects_dir=subjects_dir, verbose=True)  # downloads if needed

# %%
# Let's load some sEEG data with channel locations and make epochs.

raw = mne.io.read_raw(op.join(misc_path, 'seeg', 'sample_seeg_ieeg.fif'))

events, event_id = mne.events_from_annotations(raw)
epochs = mne.Epochs(raw, events, event_id, detrend=1, baseline=None)
epochs = epochs['Response'][0]  # just process one epoch of data for speed

# %%
# Let use the Talairach transform computed in the Freesurfer recon-all
# to apply the Freesurfer surface RAS ('mri') to MNI ('mni_tal') transform.

montage = epochs.get_montage()
Esempio n. 6
0
 def construct_subject():
     MNE_Repo_Mat.subjects_dir = os.path.dirname(fetch_fsaverage())
     MNE_Repo_Mat.subject='fsaverage'
     return MNE_Repo_Mat.subject
Esempio n. 7
0
        # Create dummy info
        info=mne.create_info(
            ch_names=montage.ch_names,
            sfreq=1,
            ch_types='eeg',
            montage=montage,
        ),
    )
    set_3d_view(figure=fig, azimuth=135, elevation=80)
    set_3d_title(figure=fig, title=current_montage)

###############################################################################
# Check all montages against fsaverage
#

subjects_dir = op.dirname(fetch_fsaverage())

for current_montage in get_builtin_montages():
    montage = mne.channels.make_standard_montage(current_montage)
    fig = mne.viz.plot_alignment(
        # Plot options
        show_axes=True,
        dig=True,
        surfaces='head',
        trans=None,
        subject='fsaverage',
        subjects_dir=subjects_dir,

        # Create dummy info
        info=mne.create_info(
            ch_names=montage.ch_names,
Esempio n. 8
0
shipped in MNE-python, and display it on fsaverage template.
"""  # noqa: D205, D400
# Authors: Alexandre Gramfort <*****@*****.**>
#          Joan Massich <*****@*****.**>
#
# License: BSD Style.

from mayavi import mlab
import os.path as op

import mne
from mne.channels.montage import get_builtin_montages
from mne.datasets import fetch_fsaverage
from mne.viz import plot_alignment

subjects_dir = op.dirname(fetch_fsaverage())

###############################################################################
# check all montages
#

for current_montage in get_builtin_montages():

    montage = mne.channels.read_montage(current_montage,
                                        unit='auto',
                                        transform=False)

    info = mne.create_info(ch_names=montage.ch_names,
                           sfreq=1,
                           ch_types='eeg',
                           montage=montage)
Esempio n. 9
0
def boxy2mne(*, boxy_file=None, mtg_file=None, coord_file=None):

    # =============================================================================
    #     ready raw data from boxy file
    # =============================================================================
    ###this keeps track of the line we're on###
    ###mostly to know the start and stop of data (probably an easier way)###
    line_num = 0

    ###load and read data to get some meta information###
    ###there is alot of information at the beginning of a file###
    ###but this only grabs some of it###
    with open(boxy_file, 'r') as data:
        for i_line in data:
            line_num += 1
            if '#DATA ENDS' in i_line:
                end_line = line_num - 1
                break
            if 'Detector Channels' in i_line:
                detect_num = int(i_line.rsplit(' ')[0])
            elif 'External MUX Channels' in i_line:
                source_num = int(i_line.rsplit(' ')[0])
            elif 'Auxiliary Channels' in i_line:
                aux_num = int(i_line.rsplit(' ')[0])
            elif 'Waveform (CCF) Frequency (Hz)' in i_line:
                ccf_ha = float(i_line.rsplit(' ')[0])
            elif 'Update Rate (Hz)' in i_line:
                srate = float(i_line.rsplit(' ')[0])
            elif 'Updata Rate (Hz)' in i_line:
                srate = float(i_line.rsplit(' ')[0])
            elif '#DATA BEGINS' in i_line:
                start_line = line_num

    ###now let's go through and parse our raw data###
    raw_data = pd.read_csv(boxy_file, skiprows=start_line, sep='\t')

    ###detectors, sources, and data types###
    detectors = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    ]
    data_types = ['AC', 'DC', 'Ph']
    sources = np.arange(1, source_num + 1, 1)

    ###since we can save boxy files in two different styles###
    ###this will check to see which style the data is saved###
    ###seems to also work with older boxy files###
    if 'exmux' in raw_data.columns:
        filetype = 'non-parsed'

        ###drop the last line as this is just '#DATA ENDS'###
        raw_data = raw_data.drop([len(raw_data) - 1])

        ###store some extra info###
        record = raw_data['record'].to_numpy()
        exmux = raw_data['exmux'].to_numpy()

        ###make some empty variables to store our data###
        raw_ac = np.zeros(
            (detect_num * source_num, int(len(raw_data) / source_num)))
        raw_dc = np.zeros(
            (detect_num * source_num, int(len(raw_data) / source_num)))
        raw_ph = np.zeros(
            (detect_num * source_num, int(len(raw_data) / source_num)))
    else:
        filetype = 'parsed'

        ###drop the last line as this is just '#DATA ENDS'###
        ###also drop the first line since this is empty###
        raw_data = raw_data.drop([0, len(raw_data) - 1])

        ###make some empty variables to store our data###
        raw_ac = np.zeros(((detect_num * source_num), len(raw_data)))
        raw_dc = np.zeros(((detect_num * source_num), len(raw_data)))
        raw_ph = np.zeros(((detect_num * source_num), len(raw_data)))

    ###store some extra data, might not need these though###
    time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else []
    time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else []
    group = raw_data['group'].to_numpy() if 'group' in raw_data.columns else []
    step = raw_data['step'].to_numpy() if 'step' in raw_data.columns else []
    mark = raw_data['mark'].to_numpy() if 'mark' in raw_data.columns else []
    flag = raw_data['flag'].to_numpy() if 'flag' in raw_data.columns else []
    aux1 = raw_data['aux-1'].to_numpy() if 'aux-1' in raw_data.columns else []
    digaux = raw_data['digaux'].to_numpy(
    ) if 'digaux' in raw_data.columns else []
    bias = np.zeros((detect_num, len(raw_data)))

    ###loop through detectors###
    for i_detect in detectors[0:detect_num]:

        ###older boxy files don't seem to keep track of detector bias###
        ###probably due to specific boxy settings actually###
        if 'bias-A' in raw_data.columns:
            bias[detectors.index(i_detect), :] = raw_data['bias-' +
                                                          i_detect].to_numpy()

        ###loop through data types###
        for i_data in data_types:

            ###loop through sources###
            for i_source in sources:

                ###where to store our data###
                index_loc = detectors.index(i_detect) * source_num + (
                    i_source - 1)

                ###need to treat our filetypes differently###
                if filetype == 'non-parsed':

                    ###filetype saves timepoints in groups###
                    ###this should account for that###
                    time_points = np.arange(i_source - 1,
                                            int(record[-1]) * source_num,
                                            source_num)

                    ###determine which channel to look for###
                    channel = i_detect + '-' + i_data

                    ###save our data based on data type###
                    if data_types.index(i_data) == 0:
                        raw_ac[index_loc, :] = raw_data[channel][
                            time_points].to_numpy()
                    elif data_types.index(i_data) == 1:
                        raw_dc[index_loc, :] = raw_data[channel][
                            time_points].to_numpy()
                    elif data_types.index(i_data) == 2:
                        raw_ph[index_loc, :] = raw_data[channel][
                            time_points].to_numpy()
                elif filetype == 'parsed':

                    ###determine which channel to look for###
                    channel = i_detect + '-' + i_data + str(i_source)

                    ###save our data based on data type###
                    if data_types.index(i_data) == 0:
                        raw_ac[index_loc, :] = raw_data[channel].to_numpy()
                    elif data_types.index(i_data) == 1:
                        raw_dc[index_loc, :] = raw_data[channel].to_numpy()
                    elif data_types.index(i_data) == 2:
                        raw_ph[index_loc, :] = raw_data[channel].to_numpy()

# =============================================================================
#     read source and electrode locations from .mtg and .tol/.elp files
# =============================================================================

###set up some variables###
    chan_num = []
    source_label = []
    detect_label = []
    chan_wavelength = []
    chan_modulation = []

    ###load and read each line of the .mtg file###
    with open(mtg_file, 'r') as data:
        for i_ignore in range(2):
            next(data)
        for i_line in data:
            chan1, chan2, source, detector, wavelength, modulation = i_line.split(
            )
            chan_num.append(chan1)
            source_label.append(source)
            detect_label.append(detector)
            chan_wavelength.append(wavelength)
            chan_modulation.append(modulation)

    ###check if we are given a .tol or .elp file###
    all_labels = []
    all_coords = []
    fiducial_coords = []
    if coord_file[-3:].lower() == 'elp'.lower():
        get_label = 0
        get_coords = 0
        ###load and read .elp file###
        with open(coord_file, 'r') as data:
            for i_line in data:
                ###first let's get our fiducial coordinates###
                if '%F' in i_line:
                    fiducial_coords.append(i_line.split()[1:])
                ###check where sensor info starts###
                if '//Sensor name' in i_line:
                    get_label = 1
                elif get_label == 1:
                    ###grab the part after '%N' for the label###
                    label = i_line.split()[1]
                    all_labels.append(label)
                    get_label = 0
                    get_coords = 1
                elif get_coords == 1:
                    X, Y, Z = i_line.split()
                    all_coords.append([float(X), float(Y), float(Z)])
                    get_coords = 0
        for i_index in range(3):
            fiducial_coords[i_index] = np.asarray(
                [float(x) for x in fiducial_coords[i_index]])
    elif coord_file[-3:] == 'tol':
        ###load and read .tol file###
        with open(coord_file, 'r') as data:
            for i_line in data:
                label, X, Y, Z = i_line.split()
                all_labels.append(label)
                ###convert coordinates from mm to m##
                all_coords.append([(float(X) * 0.001), (float(Y) * 0.001),
                                   (float(Z) * 0.001)])

    ###get coordinates for sources###
    source_coords = []
    for i_chan in source_label:
        if i_chan in all_labels:
            chan_index = all_labels.index(i_chan)
            source_coords.append(all_coords[chan_index])

    ###get coordinates for detectors###
    detect_coords = []
    for i_chan in detect_label:
        if i_chan in all_labels:
            chan_index = all_labels.index(i_chan)
            detect_coords.append(all_coords[chan_index])

    ###need to rename labels to make other functions happy###
    ###get our unique labels for sources and detectors###
    unique_source_labels = []
    unique_detect_labels = []
    [
        unique_source_labels.append(label) for label in source_label
        if label not in unique_source_labels
    ]
    [
        unique_detect_labels.append(label) for label in detect_label
        if label not in unique_detect_labels
    ]

    ###now let's label each channel in our data###
    ###data is channels X timepoint where the first source_num rows correspond to###
    ###the first detector, and each row within that group is a different source###
    ###should note that current .mtg files contain channels for multiple data files###
    ###going to move to have a single .mtg file per participant, condition, and montage###
    ###combine coordinates and label our channels###
    ###will label them based on ac, dc, and ph data###
    boxy_coords = []
    boxy_labels = []
    data_types = ['AC', 'DC', 'Ph']
    total_chans = detect_num * source_num
    for i_type in data_types:
        for i_coord in range(len(source_coords[0:total_chans])):
            boxy_coords.append(
                np.mean(np.vstack((source_coords[i_coord],
                                   detect_coords[i_coord])),
                        axis=0).tolist() + source_coords[i_coord] +
                detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] +
                [0])
            boxy_labels.append(
                'S' +
                str(unique_source_labels.index(source_label[i_coord]) + 1) +
                '_D' +
                str(unique_detect_labels.index(detect_label[i_coord]) + 1) +
                ' ' + chan_wavelength[i_coord] + ' ' + i_type)

    ###montage only wants channel coords, so need to grab those, convert to###
    ###array, then make a dict with labels###
    for i_chan in range(len(boxy_coords)):
        boxy_coords[i_chan] = np.asarray(boxy_coords[i_chan], dtype=np.float64)

    for i_chan in range(len(all_coords)):
        all_coords[i_chan] = np.asarray(all_coords[i_chan], dtype=np.float64)

    all_chan_dict = dict(zip(all_labels, all_coords))

    ###make our montage###
    montage_orig = mne.channels.make_dig_montage(ch_pos=all_chan_dict,
                                                 coord_frame='head',
                                                 nasion=fiducial_coords[0],
                                                 lpa=fiducial_coords[1],
                                                 rpa=fiducial_coords[2])

    ###for some reason make_dig_montage put our channels in a different order than what we input###
    ###let's fix that. should be fine to just change coords and ch_names###
    for i_chan in range(len(all_coords)):
        montage_orig.dig[i_chan + 3]['r'] = all_coords[i_chan]
        montage_orig.ch_names[i_chan] = all_labels[i_chan]

    ###add an extra channel for our triggers###
    boxy_labels.append('Markers')

    ###create info structure###
    info = mne.create_info(boxy_labels, srate, ch_types='fnirs_raw')
    info.update(dig=montage_orig.dig)

    ###get our fiducials and transform matrix from fsaverage###
    subjects_dir = op.dirname(fetch_fsaverage())
    fid_path = op.join(subjects_dir, 'fsaverage', 'bem',
                       'fsaverage-fiducials.fif')
    fiducials = read_fiducials(fid_path)
    trans = coregister_fiducials(info, fiducials[0], tol=0.02)

    ###remake montage using the transformed coordinates###
    all_coords_trans = apply_trans(trans, all_coords)
    all_chan_dict_trans = dict(zip(all_labels, all_coords_trans))
    fiducial_coords_trans = apply_trans(trans, fiducial_coords)

    ###make our montage###
    montage_trans = mne.channels.make_dig_montage(
        ch_pos=all_chan_dict_trans,
        coord_frame='head',
        nasion=fiducial_coords_trans[0],
        lpa=fiducial_coords_trans[1],
        rpa=fiducial_coords_trans[2])

    ###let's fix montage order again###
    for i_chan in range(len(all_coords_trans)):
        montage_trans.dig[i_chan + 3]['r'] = all_coords_trans[i_chan]
        montage_trans.ch_names[i_chan] = all_labels[i_chan]

    ###add data type and channel wavelength to info###
    info.update(dig=montage_trans.dig, trans=trans)

    ###place our coordinates and wavelengths for each channel###
    for i_chan in range(len(boxy_labels) - 1):
        temp_chn = apply_trans(trans, boxy_coords[i_chan][0:3])
        temp_src = apply_trans(trans, boxy_coords[i_chan][3:6])
        temp_det = apply_trans(trans, boxy_coords[i_chan][6:9])
        temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64)
        info['chs'][i_chan]['loc'] = test = np.concatenate(
            (temp_chn, temp_src, temp_det, temp_other), axis=0)
    info['chs'][-1]['loc'] = np.zeros((12, ))

    ###now combine our data types into a single array###
    all_data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0), axis=0)

    ###add our markers to the data array based on filetype###
    if filetype == 'non-parsed':
        if type(digaux) is list and digaux != []:
            markers = digaux[np.arange(0, len(digaux), source_num)]
        else:
            markers = np.zeros(np.size(all_data, axis=1))
    elif filetype == 'parsed':
        markers = digaux
    all_data = np.vstack((all_data, markers))

    ###create our raw data object###
    raw_data_obj = mne.io.RawArray(all_data, info)

    return raw_data_obj
def extract_ts(dir_prepro_dat, dir_save, lower, upper, atlas):
    """
    Parameters
    ----------
    dir_prepro_dat : string
        Path to saved preprocessed data.
    dir_save : string
        Path to where the extracted time series should be saved.
    lower : list of floats
        Lower limit of the desired frequency ranges. Needs to have same length
        as upper.
    upper : list of floats
        Upper limit of the desired frequency ranges. Needs to have same length
        as lower.

    Notes
    ----------
    Saves the extracted time series and the run time as a dictionary, in the 
    chosen path (dir_save).

    """

    ##################################################################
    # Initialize parameters
    ##################################################################

    fs_dir = fetch_fsaverage(verbose=True)
    subjects_dir = op.dirname(fs_dir)
    #fs_dir = '/home/kmsa/mne_data/MNE-fsaverage-data/fsaverage'
    #subjects_dir = '/home/kmsa/mne_data/MNE-fsaverage-data'

    # The files live in:
    subject = 'fsaverage'
    trans = 'fsaverage'  # MNE has a built-in fsaverage transformation
    src = mne.setup_source_space(subject,
                                 spacing='oct6',
                                 subjects_dir=None,
                                 add_dist=False)
    # Boundary element method
    bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')

    # Inverse parameters
    method = "eLORETA"  #other options are minimum norm, dSPM, and sLORETA
    snr = 3.
    lambda2 = 1. / snr**2
    buff_sz = 250

    # Create folder if it does not exist
    if not op.exists(dir_save):
        mkdir(dir_save)
        print('\nCreated new path : ' + dir_save)

    # Check what atlas to use and read labels
    if atlas == 'DK':
        # Desikan-Killiany Atlas = aparc
        parc = 'Yeo2011_7Networks_N1000'  # 'aparc'
        labels = read_labels_from_annot(subject,
                                        parc='aparc',
                                        hemi='both',
                                        surf_name='white',
                                        annot_fname=None,
                                        regexp=None,
                                        subjects_dir=subjects_dir,
                                        verbose=None)

        labels = labels[:-1]
    # elif atlas == 'BA':
    #     # Broadmann areas
    #     labels = read_labels_from_annot(subject, parc = 'PALS_B12_Brodmann', hemi='both',
    #                                  surf_name= 'white', annot_fname = None, regexp = None,
    #                                  subjects_dir = subjects_dir, verbose = None)
    #     labels = labels[5:87]

    elif atlas == 'BAita':
        # Brodmann areas collected as in Di Lorenzo et al.
        labels = read_labels_from_annot(subject,
                                        parc='PALS_B12_Brodmann',
                                        hemi='both',
                                        surf_name='white',
                                        annot_fname=None,
                                        regexp=None,
                                        subjects_dir=subjects_dir,
                                        verbose=None)
        labels = labels[5:87]
        lab_dict = {}
        for lab in labels:
            lab_dict[lab.name] = lab

        ita_ba = [
            [1, 2, 3, 4],
            [5, 7],
            [6, 8],
            [9, 10],
            [11, 47],
            [44, 45, 46],  #[13],
            [20, 21, 22, 38, 41, 42],
            [24, 25, 32],  #[24,25,32,33], 
            [23, 29, 30, 31],
            [27, 28, 35, 36],  #[27,28,34,35,36], 
            [39, 40, 43],
            [19, 37],
            [17, 18]
        ]
        # ita_label = ['SMA', 'SPL', 'SFC', 'AFC', 'OFC', 'LFC', #'INS',
        #              'LTL', 'ACC_new', 'PCC', 'PHG_new', 'IPL', 'FLC', 'PVC']

        # Sort labels according to connectivity featurers
        new_label = []
        for idx, i in enumerate(ita_ba):
            for j in i:
                ba_lh = 'Brodmann.' + str(j) + '-lh'
                ba_rh = 'Brodmann.' + str(j) + '-rh'

                if j == i[0]:
                    sum_lh = lab_dict[ba_lh]
                    sum_rh = lab_dict[ba_rh]
                else:
                    sum_lh += lab_dict[ba_lh]
                    sum_rh += lab_dict[ba_rh]
            new_label.append(sum_lh)
            new_label.append(sum_rh)

        labels = new_label

    elif atlas == 'DKLobes':
        # Brodmann areas collected as in Di Lorenzo et al.
        labels = read_labels_from_annot(subject,
                                        parc='aparc',
                                        hemi='both',
                                        surf_name='white',
                                        annot_fname=None,
                                        regexp=None,
                                        subjects_dir=subjects_dir,
                                        verbose=None)
        # Divide into lobes based on
        # https://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation
        frontal = [
            'superiorfrontal', 'rostralmiddlefrontal', 'caudalmiddlefrontal',
            'parsopercularis', 'parstriangularis', 'parsorbitalis',
            'lateralorbitofrontal', 'medialorbitofrontal', 'precentral',
            'paracentral', 'frontalpole', 'rostralanteriorcingulate',
            'caudalanteriorcingulate'
        ]
        parietal = [
            'superiorparietal', 'inferiorparietal', 'supramarginal',
            'postcentral', 'precuneus', 'posteriorcingulate',
            'isthmuscingulate'
        ]
        temporal = [
            'superiortemporal', 'middletemporal', 'inferiortemporal',
            'bankssts', 'fusiform', 'transversetemporal', 'entorhinal',
            'temporalpole', 'parahippocampal'
        ]
        occipital = ['lateraloccipital', 'lingual', 'cuneus', 'pericalcarine']

        all_lobes = {
            'frontal': frontal,
            'parietal': parietal,
            'occipital': occipital,
            'temporal': temporal
        }

        labels = labels[:-1]
        lab_dict = {}
        for lab in labels:
            lab_dict[lab.name] = lab

        # Sort labels according to connectivity featurers
        new_label = []
        for lobes in list(all_lobes.keys()):
            for idx, name in enumerate(all_lobes[lobes]):
                name_lh = name + '-lh'
                name_rh = name + '-rh'

                if idx == 0:
                    sum_lh = lab_dict[name_lh]
                    sum_rh = lab_dict[name_rh]
                else:
                    sum_lh += lab_dict[name_lh]
                    sum_rh += lab_dict[name_rh]
            sum_lh.name = lobes + '-lh'
            sum_rh.name = lobes + '-rh'
            new_label.append(sum_lh)
            new_label.append(sum_rh)

        labels = new_label
    # elif finished

    # Create folder if it does not exist
    if not op.exists(dir_save):
        mkdir(dir_save)
        print('\nCreated new path : ' + dir_save)

    # List of time series that have already been saved
    already_saved = [i.split('_' + atlas)[0] for i in listdir(dir_save)]

    count = 0
    run_time = 0

    for filename in tqdm(listdir(dir_prepro_dat)):
        # Only loop over ".set" files
        if not filename.endswith(".set"):
            continue

        # Only choose the files that are not already in the save directory
        if filename.split('.')[0] in already_saved:
            count += 1
            continue

        start = timeit.default_timer()
        timeseries_dict = {}
        ###################################
        # Load preprocessed data
        ###################################
        ID_list = op.join(dir_prepro_dat, filename)
        raw = mne.io.read_raw_eeglab(ID_list, preload=True)

        # Set montage (number of used channels = 64)
        raw.set_montage('biosemi64')

        ##################################################################
        # Forward soultion: from brain to electrode
        ##################################################################
        fwd = mne.make_forward_solution(raw.info,
                                        trans=trans,
                                        src=src,
                                        bem=bem,
                                        eeg=True,
                                        mindist=5.0,
                                        n_jobs=-1)

        ##################################################################
        # Inverse modeling
        ##################################################################
        # Compute noise covariance
        noise_cov = mne.compute_raw_covariance(raw, n_jobs=-1)
        # make an EEG inverse operator
        inverse_operator = make_inverse_operator(raw.info,
                                                 fwd,
                                                 noise_cov,
                                                 loose=0.2,
                                                 depth=0.8)

        raw.set_eeg_reference('average', projection=True)

        # Hardcoded print to give an overview of how much is done and left
        print('\n####################################################' +
              '\n####################################################' +
              '\n####################################################' +
              '\nSubject number: ' + str(count) + ', ' +
              filename.split('.')[0] + '\nRun time/subject = ' +
              str(run_time) + '\nRun time left in hours ~' +
              str((85 - (count + 1)) * (run_time / 60)) +
              '\n####################################################' +
              '\n####################################################' +
              '\n####################################################')

        # Compute inverse solution
        stc = apply_inverse_raw(raw,
                                inverse_operator,
                                lambda2,
                                method=method,
                                nave=1,
                                pick_ori=None,
                                verbose=True,
                                buffer_size=buff_sz)
        #pdb.set_trace()
        del raw

        # Hardcoded print to give an overview of how much is done and left
        print('\n####################################################' +
              '\n####################################################' +
              '\n####################################################' +
              '\nSubject number: ' + str(count) + ', ' +
              filename.split('.')[0] + '\nRun time/subject = ' +
              str(run_time) + '\nRun time left in hours ~' +
              str((85 - (count + 1)) * (run_time / 60)) +
              '\n####################################################' +
              '\n####################################################' +
              '\n####################################################')
        ##################################################################
        # Extract timeseries from DK regions
        ##################################################################
        # Label time series by Desikan-Killiany Atlas -> 68 ts
        label_ts = mne.extract_label_time_course(stc,
                                                 labels,
                                                 inverse_operator['src'],
                                                 mode='pca_flip',
                                                 return_generator=True)
        del stc
        ###################################################################
        # Construct and save dictionary
        ###################################################################
        subject = filename.split('_')[0]
        stop = timeit.default_timer()
        run_time = (stop - start) / 60
        timeseries_dict = {'timeseries': label_ts, 'time': run_time}
        del label_ts
        # Save to computer
        save_name = dir_save + '/' + subject + '_' + atlas + '_timeseries' + '.pkl'
        #save_name = '/share/FannyMaster/PythonNew/DK_timeseries/DK_source_timeseries_'+ date +'.pkl'

        with open(save_name, 'wb') as file:
            pickle.dump(timeseries_dict, file)

        del timeseries_dict

        count += 1
Esempio n. 11
0
"""

# Authors: Alexandre Gramfort <*****@*****.**>
#          Joan Massich <*****@*****.**>
#
# License: BSD Style.

import os.path as op

import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage

# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)

# The files live in:
subject = 'fsaverage'
trans = op.join(fs_dir, 'bem', 'fsaverage-trans.fif')
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')

##############################################################################
# Load the data
# -------------
#
# We use here EEG data from the BCI dataset.

raw_fname, = eegbci.load_data(subject=1, runs=[6])