示例#1
0
def chop_raw_data(raw, start_time=60.0, stop_time=360.0):
    ''' 
    This function extracts specified duration of raw data 
    and write it into a fif file.
    Five mins of data will be extracted by default.

    Parameters
    ----------

    raw: Raw object. 
    start_time: Time to extract data from in seconds. Default is 60.0 seconds. 
    stop_time: Time up to which data is to be extracted. Default is 360.0 seconds.

    '''
    # Check if data is longer than required chop duration.
    if (raw.n_times / (raw.info['sfreq'])) < (stop_time + 60.0):
        logger.info("The data is not long enough.")
        return
    # Obtain indexes for start and stop times.
    assert start_time < stop_time, "Start time is greater than stop time."
    start_idx = raw.time_as_index(start_time)
    stop_idx = raw.time_as_index(stop_time)
    data, times = raw[:, start_idx:stop_idx]
    raw._data,raw._times = data, times
    dur = int((stop_time - start_time) / 60)
    raw.save(raw.info['filename'].split('/')[-1].split('.')[0]+'_'+str(dur)+'m.fif')
    # For the moment, simply warn.
    logger.warning('The file name is not saved in standard form.')
    return
示例#2
0
def _make_interpolator(inst, bad_channels):
    """Find indexes and interpolation matrix to interpolate bad channels

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    """
    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)

    picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
    bads_idx[picks] = [inst.ch_names[ch] in bad_channels for ch in picks]
    goods_idx[picks] = True
    goods_idx[bads_idx] = False

    if bads_idx.sum() != len(bad_channels):
        logger.warning('Channel interpolation is currently only implemented '
                       'for EEG. The MEG channels marked as bad will remain '
                       'untouched.')

    pos = get_channel_positions(inst, picks)

    # Make sure only EEG are used
    bads_idx_pos = bads_idx[picks]
    goods_idx_pos = goods_idx[picks]

    pos_good = pos[goods_idx_pos]
    pos_bad = pos[bads_idx_pos]

    # test spherical fit
    radius, center = _fit_sphere(pos_good)
    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
    distance = np.mean(distance / radius)
    if np.abs(1. - distance) > 0.1:
        logger.warning('Your spherical fit is poor, interpolation results are '
                       'likely to be inaccurate.')

    logger.info('Computing interpolation matrix from {0} sensor '
                'positions'.format(len(pos_good)))

    interpolation = _make_interpolation_matrix(pos_good, pos_bad)

    return goods_idx, bads_idx, interpolation
示例#3
0
def read_evokeds_hcp(subject, data_type, onset='stim', sensor_mode='mag',
                     hcp_path=op.curdir):
    """Read HCP processed data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
    onset : {'stim', 'resp'}
        The event onset. Only considered for epochs and evoked outputs
        The mapping is generous, everything that is not a response is a
        stimulus, in the sense of internal or external events.
    sensor_mode : {'mag', 'planar'}
        The sensor projection. Defaults to 'mag'. Only relevant for
        evoked output.
    hcp_path : str
        The HCP directory, defaults to op.curdir.

    Returns
    -------
    epochs : instance of mne.Epochs
        The MNE epochs. Note, these are pseudo-epochs in the case of
        onset == 'rest'.
    """
    try:
        info = read_info_hcp(subject=subject, data_type=data_type,
                             hcp_path=hcp_path, run_index=0)
    except (ValueError, IOError):
        logger.warning('could not find config to complete info.'
                       'reading only channel positions without transforms.')
        info = None

    evoked_files = list()
    for fname in get_file_paths(
            subject=subject, data_type=data_type, onset=onset,
            output='evoked', sensor_mode=sensor_mode, hcp_path=hcp_path):
        evoked_files.extend(_read_evoked(fname, sensor_mode, info))
    return evoked_files
示例#4
0
def combine_meeg(raw_fname, eeg_fname, flow=0.6, fhigh=200,
                 filter_order=2, njobs=-1):
    '''
    Functions combines meg data with eeg data. This is done by: -
        1. Adjust MEG and EEG data length.
        2. Resampling EEG data channels to match sampling
           frequency of MEG signals.
        3. Write EEG channels into MEG fif file and write to disk.

    Parameters
    ----------
    raw_fname: FIF file containing MEG data.
    eeg_fname: FIF file containing EEG data.
    flow, fhigh: Low and high frequency limits for filtering.
                 (default 0.6-200 Hz)
    filter_order: Order of the Butterworth filter used for filtering.
    njobs : Number of jobs.

    Warning: Please make sure that the filter settings provided
             are stable for both MEG and EEG data.
    Only channels ECG 001, EOG 001, EOG 002 and STI 014 are written.
    '''

    import numpy as np
    import mne
    from mne.utils import logger

    if not raw_fname.endswith('-meg.fif') and \
            not eeg_fname.endswith('-eeg.fif'):
        logger.warning('Files names are not standard. \
                        Please use standard file name extensions.')

    raw = mne.io.Raw(raw_fname, preload=True)
    eeg = mne.io.Raw(eeg_fname, preload=True)

    # Filter both signals
    filter_type = 'butter'
    logger.info('The MEG and EEG signals will be filtered from %s to %s' \
                % (flow, fhigh))
    picks_fil = mne.pick_types(raw.info, meg=True, eog=True, \
                               ecg=True, exclude='bads')
    raw.filter(flow, fhigh, picks=picks_fil, n_jobs=njobs, method='iir', \
               iir_params={'ftype': filter_type, 'order': filter_order})
    picks_fil = mne.pick_types(eeg.info, meg=False, eeg=True, exclude='bads')
    eeg.filter(flow, fhigh, picks=picks_fil, n_jobs=njobs, method='iir', \
               iir_params={'ftype': filter_type, 'order': filter_order})

    # Find sync pulse S128 in stim channel of EEG signal.
    start_idx_eeg = mne.find_events(eeg, stim_channel='STI 014', \
                                    output='onset')[0, 0]

    # Find sync pulse S128 in stim channel of MEG signal.
    start_idx_raw = mne.find_events(raw, stim_channel='STI 014', \
                                    output='onset')[0, 0]

    # Start times for both eeg and meg channels
    start_time_eeg = eeg.times[start_idx_eeg]
    start_time_raw = raw.times[start_idx_raw]

    # Stop times for both eeg and meg channels
    stop_time_eeg = eeg.times[eeg.last_samp]
    stop_time_raw = raw.times[raw.last_samp]

    # Choose channel with shortest duration (usually MEG)
    meg_duration = stop_time_eeg - start_time_eeg
    eeg_duration = stop_time_raw - start_time_raw
    diff_time = min(meg_duration, eeg_duration)

    # Reset both the channel times based on shortest duration
    end_time_eeg = diff_time + start_time_eeg
    end_time_raw = diff_time + start_time_raw

    # Calculate the index of the last time points
    stop_idx_eeg = eeg.time_as_index(round(end_time_eeg, 3))[0]
    stop_idx_raw = raw.time_as_index(round(end_time_raw, 3))[0]

    events = mne.find_events(eeg, stim_channel='STI 014', output='onset',
                             consecutive=True)
    events = events[np.where(events[:, 0] < stop_idx_eeg)[0], :]
    events = events[np.where(events[:, 0] > start_idx_eeg)[0], :]
    events[:, 0] -= start_idx_eeg

    eeg_data, eeg_times = eeg[:, start_idx_eeg:stop_idx_eeg]
    _, raw_times = raw[:, start_idx_raw:stop_idx_raw]

    # Resample eeg signal
    resamp_list = jumeg_resample(raw.info['sfreq'], eeg.info['sfreq'], \
                                 raw_times.shape[0], events=events)

    # Update eeg signal
    eeg._data, eeg._times = eeg_data[:, resamp_list], eeg_times[resamp_list]

    # Update meg signal
    raw._data, raw._times = raw[:, start_idx_raw:stop_idx_raw]
    raw._first_samps[0] = 0
    raw._last_samps[0] = raw._data.shape[1] - 1

    # Identify raw channels for ECG, EOG and STI and replace it with relevant data.
    logger.info('Only ECG 001, EOG 001, EOG002 and STI 014 will be updated.')
    raw._data[raw.ch_names.index('ECG 001')] = eeg._data[0]
    raw._data[raw.ch_names.index('EOG 001')] = eeg._data[1]
    raw._data[raw.ch_names.index('EOG 002')] = eeg._data[2]
    raw._data[raw.ch_names.index('STI 014')] = eeg._data[3]

    # Write the combined FIF file to disk.
    raw.save(raw_fname.split('-')[0] + '-raw.fif', overwrite=True)
示例#5
0
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
                          subjects_dir=None, annot_fname=None,
                          colormap='hsv', hemi='both'):
    """Create a FreeSurfer annotation from a list of labels

    FIX: always write both hemispheres

    Parameters
    ----------
    labels : list with instances of mne.Label
        The labels to create a parcellation from.
    subject : str | None
        The subject for which to write the parcellation for.
    parc : str | None
        The parcellation name to use.
    overwrite : bool
        Overwrite files if they already exist.
    subjects_dir : string, or None
        Path to SUBJECTS_DIR if it is not set in the environment.
    annot_fname : str | None
        Filename of the .annot file. If not None, only this file is written
        and 'parc' and 'subject' are ignored.
    colormap : str
        Colormap to use to generate label colors for labels that do not
        have a color specified.
    hemi : 'both' | 'lh' | 'rh'
        The hemisphere(s) for which to write *.annot files (only applies if
        annot_fname is not specified; default is 'both').
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Notes
    -----
    Vertices that are not covered by any of the labels are assigned to a label
    named "unknown".
    """
    subjects_dir = get_subjects_dir(subjects_dir)

    # get the .annot filenames and hemispheres
    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
                                          subjects_dir)

    if not overwrite:
        for fname in annot_fname:
            if op.exists(fname):
                raise ValueError('File %s exists. Use "overwrite=True" to '
                                 'overwrite it' % fname)

    # prepare container for data to save:
    to_save = []
    # keep track of issues found in the labels
    duplicate_colors = []
    invalid_colors = []
    overlap = []
    no_color = (-1, -1, -1, -1)
    no_color_rgb = (-1, -1, -1)
    for hemi, fname in zip(hemis, annot_fname):
        hemi_labels = [label for label in labels if label.hemi == hemi]
        n_hemi_labels = len(hemi_labels)

        if n_hemi_labels == 0:
            ctab = np.empty((0, 4), dtype=np.int32)
            ctab_rgb = ctab[:, :3]
        else:
            hemi_labels.sort(key=lambda label: label.name)

            # convert colors to 0-255 RGBA tuples
            hemi_colors = [no_color if label.color is None else
                           tuple(int(round(255 * i)) for i in label.color)
                           for label in hemi_labels]
            ctab = np.array(hemi_colors, dtype=np.int32)
            ctab_rgb = ctab[:, :3]

            # make color dict (for annot ID, only R, G and B count)
            labels_by_color = defaultdict(list)
            for label, color in zip(hemi_labels, ctab_rgb):
                labels_by_color[tuple(color)].append(label.name)

            # check label colors
            for color, names in labels_by_color.items():
                if color == no_color_rgb:
                    continue

                if color == (0, 0, 0):
                    # we cannot have an all-zero color, otherw. e.g. tksurfer
                    # refuses to read the parcellation
                    msg = ('At least one label contains a color with, "r=0, '
                           'g=0, b=0" value. Some FreeSurfer tools may fail '
                           'to read the parcellation')
                    logger.warning(msg)

                if any(i > 255 for i in color):
                    msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
                    invalid_colors.append(msg)

                if len(names) > 1:
                    msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
                    duplicate_colors.append(msg)

            # replace None values (labels with unspecified color)
            if labels_by_color[no_color_rgb]:
                default_colors = _n_colors(n_hemi_labels, bytes_=True,
                                           cmap=colormap)
                safe_color_i = 0  # keep track of colors known to be in hemi_colors
                for i in xrange(n_hemi_labels):
                    if ctab[i, 0] == -1:
                        color = default_colors[i]
                        # make sure to add no duplicate color
                        while np.any(np.all(color[:3] == ctab_rgb, 1)):
                            color = default_colors[safe_color_i]
                            safe_color_i += 1
                        # assign the color
                        ctab[i] = color

        # find number of vertices in surface
        if subject is not None and subjects_dir is not None:
            fpath = os.path.join(subjects_dir, subject, 'surf',
                                 '%s.white' % hemi)
            points, _ = read_surface(fpath)
            n_vertices = len(points)
        else:
            if len(hemi_labels) > 0:
                max_vert = max(np.max(label.vertices) for label in hemi_labels)
                n_vertices = max_vert + 1
            else:
                n_vertices = 1
            msg = ('    Number of vertices in the surface could not be '
                   'verified because the surface file could not be found; '
                   'specify subject and subjects_dir parameters.')
            logger.warning(msg)

        # Create annot and color table array to write
        annot = np.empty(n_vertices, dtype=np.int)
        annot[:] = -1
        # create the annotation ids from the colors
        annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
        annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
        for label, annot_id in zip(hemi_labels, annot_ids):
            # make sure the label is not overwriting another label
            if np.any(annot[label.vertices] != -1):
                other_ids = set(annot[label.vertices])
                other_ids.discard(-1)
                other_indices = (annot_ids.index(i) for i in other_ids)
                other_names = (hemi_labels[i].name for i in other_indices)
                other_repr = ', '.join(other_names)
                msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
                overlap.append(msg)

            annot[label.vertices] = annot_id

        hemi_names = [label.name for label in hemi_labels]

        # Assign unlabeled vertices to an "unknown" label
        unlabeled = (annot == -1)
        if np.any(unlabeled):
            msg = ("Assigning %i unlabeled vertices to "
                   "'unknown-%s'" % (unlabeled.sum(), hemi))
            logger.info(msg)

            # find an unused color (try shades of gray first)
            for i in range(1, 257):
                if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
                    break
            if i < 256:
                color = (i, i, i, 0)
            else:
                err = ("Need one free shade of gray for 'unknown' label. "
                       "Please modify your label colors, or assign the "
                       "unlabeled vertices to another label.")
                raise ValueError(err)

            # find the id
            annot_id = np.sum(annot_id_coding * color[:3])

            # update data to write
            annot[unlabeled] = annot_id
            ctab = np.vstack((ctab, color))
            hemi_names.append("unknown")

        # convert to FreeSurfer alpha values
        ctab[:, 3] = 255 - ctab[:, 3]

        # remove hemi ending in names
        hemi_names = [name[:-3] if name.endswith(hemi) else name
                      for name in hemi_names]

        to_save.append((fname, annot, ctab, hemi_names))

    issues = []
    if duplicate_colors:
        msg = ("Some labels have the same color values (all labels in one "
               "hemisphere must have a unique color):")
        duplicate_colors.insert(0, msg)
        issues.append(os.linesep.join(duplicate_colors))
    if invalid_colors:
        msg = ("Some labels have invalid color values (all colors should be "
               "RGBA tuples with values between 0 and 1)")
        invalid_colors.insert(0, msg)
        issues.append(os.linesep.join(invalid_colors))
    if overlap:
        msg = ("Some labels occupy vertices that are also occupied by one or "
               "more other labels. Each vertex can only be occupied by a "
               "single label in *.annot files.")
        overlap.insert(0, msg)
        issues.append(os.linesep.join(overlap))

    if issues:
        raise ValueError('\n\n'.join(issues))

    # write it
    for fname, annot, ctab, hemi_names in to_save:
        logger.info('   writing %d labels to %s' % (len(hemi_names), fname))
        _write_annot(fname, annot, ctab, hemi_names)

    logger.info('[done]')