Пример #1
0
def compute_contrast(glm_est, contrast, contrast_type=None):
    """
    Compute contrasts on regression results.

    This is a wrapper function for nilearn.stats.contrasts.

    Parameters
    ----------
    glm_est : dict
        Dictionary of nilearn regression results as returned by `run_glm`.
    contrast : numpy.ndarray of shape (p) or (q, p),
        Where q = number of contrast vectors and p = number of regressors.
    contrast_type : {None, ‘t’, ‘F’}, optional
        Type of the contrast. If None, then defaults to ‘t’ for 1D con_val
        and ‘F’ for 2D con_val.

    Returns
    -------
    contrast : Contrast instance,
        Yields the statistics of the contrast (effects, variance, p-values).
    """
    warn('"compute_contrast" has been deprecated in favor of the more '
         'comprehensive GLM class and will be removed in v1.0.0. '
         'Use the ResultsGLM class "compute_contrast()" method instead.',
         DeprecationWarning)
    return _compute_contrast(glm_est, contrast, contrast_type=contrast_type)
Пример #2
0
def _tidy_long_to_wide(d, expand_output=True):

    indices = ['ch_name']
    if 'Condition' in d.columns:
        # Regression results have a column condition
        indices.append('Condition')
    if 'ContrastType' in d.columns:
        # Regression results have a column condition
        indices.append('ContrastType')

    d = d.set_index(indices)
    d = d.pivot_table(columns='variable', values='value', index=indices)
    d.reset_index(inplace=True)

    if expand_output:
        try:
            d["Source"] = [
                re.search(r'S(\d+)_D(\d+) (\w+)', ch).group(1)
                for ch in d["ch_name"]
            ]
            d["Detector"] = [
                re.search(r'S(\d+)_D(\d+) (\w+)', ch).group(2)
                for ch in d["ch_name"]
            ]
            d["Chroma"] = [
                re.search(r'S(\d+)_D(\d+) (\w+)', ch).group(3)
                for ch in d["ch_name"]
            ]
        except AttributeError:
            warn("Non standard source detector names used")
        d["Significant"] = d["p_value"] < 0.05

    return d
Пример #3
0
def test_warn(capsys):
    """Test the smart warn() function."""
    with pytest.warns(RuntimeWarning, match='foo'):
        warn('foo')
    captured = capsys.readouterr()
    assert captured.out == ''  # gh-5592
    assert captured.err == ''  # this is because pytest.warns took it already
Пример #4
0
def test_warn(capsys, tmp_path, monkeypatch):
    """Test the smart warn() function."""
    with pytest.warns(RuntimeWarning, match='foo'):
        warn('foo')
    captured = capsys.readouterr()
    assert captured.out == ''  # gh-5592
    assert captured.err == ''  # this is because pytest.warns took it already
    # test ignore_namespaces
    bad_name = tmp_path / 'bad.fif'
    raw = RawArray(np.zeros((1, 1)), create_info(1, 1000., 'eeg'))
    with pytest.warns(RuntimeWarning, match='filename') as ws:
        raw.save(bad_name)
    assert len(ws) == 1
    assert 'test_logging.py' in ws[0].filename  # this file (it's in tests/)

    def warn_wrap(msg):
        warn(msg, ignore_namespaces=())

    monkeypatch.setattr(check, 'warn', warn_wrap)
    with pytest.warns(RuntimeWarning, match='filename') as ws:
        raw.save(bad_name, overwrite=True)

    assert len(ws) == 1
    assert 'test_logging.py' not in ws[0].filename  # this file
    assert '_logging.py' in ws[0].filename  # where `mne.utils.warn` lives
Пример #5
0
def test_warn(capsys):
    """Test the smart warn() function."""
    with pytest.warns(RuntimeWarning, match='foo'):
        warn('foo')
    captured = capsys.readouterr()
    assert captured.out == ''  # gh-5592
    assert captured.err == ''  # this is because pytest.warns took it already
Пример #6
0
def plot_glm_contrast_topo(inst, contrast, figsize=(12, 7), sphere=None):
    """
    Plot topomap of NIRS GLM data.

    Parameters
    ----------
    inst : instance of Info or Raw
        Raw data or info structure used to generate the GLM results.
    contrast : dict
        As in nilearn.stats.compute_contrast.
    figsize : numbers
        TODO: Remove this, how does MNE usually deal with this.
    sphere : numbers
        As specified in MNE.

    Returns
    -------
    fig : figure
        Figure of each design matrix componenent for hbo (top row)
        and hbr (bottom row).
    """
    warn(
        '"plot_glm_contrast_topo" has been deprecated in favor of the more '
        'comprehensive GLM class and will be removed in v1.0.0. '
        'Use the ContrastResults class "plot_topo()" method instead.',
        DeprecationWarning)
    return _plot_glm_contrast_topo(inst,
                                   contrast,
                                   figsize=figsize,
                                   sphere=sphere)
Пример #7
0
def my_function_B(new_param=None, old_param='not_used'):
    if old_param != 'not_used':
        warn(('old_param is deprecated and will be replaced'
              ' by new_param in 0.XX.'), DeprecationWarning)
    new_param = old_param  # noqa
    # Do what you have to do with new_param
    return 'foo'
Пример #8
0
def _handle_participants_reading(participants_fname,
                                 raw,
                                 subject,
                                 verbose=None):
    participants_tsv = _from_tsv(participants_fname)
    subjects = participants_tsv['participant_id']
    row_ind = subjects.index(subject)

    # set data from participants tsv into subject_info
    for infokey, infovalue in participants_tsv.items():
        if infokey == 'sex' or infokey == 'hand':
            value = _map_options(what=infokey,
                                 key=infovalue[row_ind],
                                 fro='bids',
                                 to='mne')
            # We don't know how to translate to MNE, so skip.
            if value is None:
                if infokey == 'sex':
                    info_str = 'subject sex'
                else:
                    info_str = 'subject handedness'
                warn(f'Unable to map `{infokey}` value to MNE. '
                     f'Not setting {info_str}.')
        else:
            value = infovalue[row_ind]
        # add data into raw.Info
        if raw.info['subject_info'] is None:
            raw.info['subject_info'] = dict()
        raw.info['subject_info'][infokey] = value

    return raw
Пример #9
0
def _handle_tkras_trans(
    elec_coords: np.ndarray,
    img: nb.Nifti2Image,
    revert_tkras: bool,
    verbose: bool = True,
):
    """Handle FreeSurfer MRI <-> TKRAS."""
    # get the voxel to tkRAS transform
    if "get_vox2ras_tkr" in dir(img.header):
        vox2ras_tkr = img.header.get_vox2ras_tkr()
    else:
        warn(
            f"Unable to programmatically get vox2ras TKR "
            f"from {img.get_filename()}, so setting manually."
        )
        vox2ras_tkr = [
            [-1.0, 0.0, 0.0, 128.0],
            [0.0, 0.0, 1.0, -128.0],
            [0.0, -1.0, 0.0, 128.0],
            [0.0, 0.0, 0.0, 1.0],
        ]

    if verbose:
        print(f"Using Vox2TKRAS affine: {vox2ras_tkr}.")

    if revert_tkras:
        affine = np.linalg.inv(vox2ras_tkr)
    else:
        affine = vox2ras_tkr

    # now convert voxels to tkras
    elec_coords = apply_affine(affine, elec_coords)
    return elec_coords
Пример #10
0
    def fit(self,
            raw: mne.io.RawArray,
            start: float = None,
            stop: float = None,
            reject_by_annotation: bool = True,
            gfp: bool = False,
            n_jobs: int = 1,
            verbose=None) -> mod_Kmeans:
        """[summary]

        Args:
            raw (mne.io.RawArray): [description]
            start (float, optional): [description]. Defaults to None.
            stop (float, optional): [description]. Defaults to None.
            reject_by_annotation (bool, optional): [description]. Defaults to True.
            gfp (bool, optional): [description]. Defaults to False.
            n_jobs (int, optional): [description]. Defaults to 1.
            verbose ([type], optional): [description]. Defaults to None.

        Returns:
            mod_Kmeans: [description]
        """
        _validate_type(raw, (BaseRaw), 'raw', 'Raw')
        reject_by_annotation = 'omit' if reject_by_annotation else None
        start, stop = _check_start_stop(raw, start, stop)
        n_jobs = check_n_jobs(n_jobs)

        if len(raw.info['bads']) is not 0:
            warn('Bad channels are present in the recording. '
                 'They will still be used to compute microstate topographies. '
                 'Consider using Raw.pick() or Raw.interpolate_bads()'
                 ' before fitting.')

        data = raw.get_data(start,
                            stop,
                            reject_by_annotation=reject_by_annotation)
        if gfp is True:
            data = _extract_gfps(data)

        best_gev = 0
        if n_jobs == 1:
            for _ in range(self.n_init):
                gev, maps, segmentation = self._run_mod_kmeans(data)
                if gev > best_gev:
                    best_gev, best_maps, best_segmentation = gev, maps, segmentation
        else:
            parallel, p_fun, _ = parallel_func(self._run_mod_kmeans,
                                               total=self.n_init,
                                               n_jobs=n_jobs)
            runs = parallel(p_fun(data) for i in range(self.n_init))
            runs = np.array(runs)
            best_run = np.argmax(runs[:, 0])
            best_gev, best_maps, best_segmentation = runs[best_run]

        self.cluster_centers = best_maps
        self.GEV = best_gev
        self.labels = best_segmentation
        self.current_fit = True
        return (self)
Пример #11
0
        def wrapped(*args, **kwargs):
            warn(_MSG, DeprecationWarning)
            if transform is None:
                kwargs[new_param] = kwargs.pop(old_param)
            else:
                kwargs[new_param] = transform(kwargs.pop(old_param))

            r = f(*args, **kwargs)
            return r
Пример #12
0
def _interpolate_bads_eeg(inst, picks=None, verbose=None):
    """ Interpolate bad EEG channels.

    Operates in place.

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    picks: np.ndarray, shape(n_channels, ) | list | None
        The channel indices to be used for interpolation.
    """
    from mne.bem import _fit_sphere
    from mne.utils import logger, warn
    from mne.channels.interpolation import _do_interp_dots
    from mne.channels.interpolation import _make_interpolation_matrix
    import numpy as np

    if picks is None:
        picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])

    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)

    inst.info._check_consistency()
    bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]

    if len(picks) == 0 or bads_idx.sum() == 0:
        return

    goods_idx[picks] = True
    goods_idx[bads_idx] = False

    pos = inst._get_channel_positions(picks)

    # Make sure only good EEG are used
    bads_idx_pos = bads_idx[picks]
    goods_idx_pos = goods_idx[picks]
    pos_good = pos[goods_idx_pos]
    pos_bad = pos[bads_idx_pos]

    # test spherical fit
    radius, center = _fit_sphere(pos_good)
    distance = np.sqrt(np.sum((pos_good - center)**2, 1))
    distance = np.mean(distance / radius)
    if np.abs(1. - distance) > 0.1:
        warn('Your spherical fit is poor, interpolation results are '
             'likely to be inaccurate.')

    logger.info('Computing interpolation matrix from {0} sensor '
                'positions'.format(len(pos_good)))

    interpolation = _make_interpolation_matrix(pos_good, pos_bad)

    logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
    _do_interp_dots(inst, interpolation, goods_idx, bads_idx)
Пример #13
0
def _interpolate_bads_eeg(inst, picks=None, verbose=None):
    """ Interpolate bad EEG channels.

    Operates in place.

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    picks: np.ndarray, shape(n_channels, ) | list | None
        The channel indices to be used for interpolation.
    """
    from mne.bem import _fit_sphere
    from mne.utils import logger, warn
    from mne.channels.interpolation import _do_interp_dots
    from mne.channels.interpolation import _make_interpolation_matrix
    import numpy as np

    if picks is None:
        picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])

    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)

    inst.info._check_consistency()
    bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]

    if len(picks) == 0 or bads_idx.sum() == 0:
        return

    goods_idx[picks] = True
    goods_idx[bads_idx] = False

    pos = inst._get_channel_positions(picks)

    # Make sure only good EEG are used
    bads_idx_pos = bads_idx[picks]
    goods_idx_pos = goods_idx[picks]
    pos_good = pos[goods_idx_pos]
    pos_bad = pos[bads_idx_pos]

    # test spherical fit
    radius, center = _fit_sphere(pos_good)
    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
    distance = np.mean(distance / radius)
    if np.abs(1. - distance) > 0.1:
        warn('Your spherical fit is poor, interpolation results are '
             'likely to be inaccurate.')

    logger.info('Computing interpolation matrix from {0} sensor '
                'positions'.format(len(pos_good)))

    interpolation = _make_interpolation_matrix(pos_good, pos_bad)

    logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
    _do_interp_dots(inst, interpolation, goods_idx, bads_idx)
Пример #14
0
def _read_events(events_data, event_id, raw, ext, verbose=None):
    """Read in events data.

    Parameters
    ----------
    events_data : str | array | None
        The events file. If a string, a path to the events file. If an array,
        the MNE events array (shape n_events, 3). If None, events will be
        inferred from the stim channel using `find_events`.
    event_id : dict
        The event id dict used to create a 'trial_type' column in events.tsv,
        mapping a description key to an integer valued event code.
    raw : instance of Raw
        The data as MNE-Python Raw object.
    ext : str
        The extension of the original data file.
    verbose : bool | str | int | None
        If not None, override default verbose level (see :func:`mne.verbose`).

    Returns
    -------
    events : array, shape = (n_events, 3)
        The first column contains the event time in samples and the third
        column contains the event id. The second column is ignored for now but
        typically contains the value of the trigger channel either immediately
        before the event or immediately after.

    """
    if isinstance(events_data, str):
        events = read_events(events_data, verbose=verbose).astype(int)
    elif isinstance(events_data, np.ndarray):
        if events_data.ndim != 2:
            raise ValueError('Events must have two dimensions, '
                             'found %s' % events_data.ndim)
        if events_data.shape[1] != 3:
            raise ValueError('Events must have second dimension of length 3, '
                             'found %s' % events_data.shape[1])
        events = events_data
    elif 'stim' in raw:
        events = find_events(raw,
                             min_duration=0.001,
                             initial_event=True,
                             verbose=verbose)
    elif ext in ['.vhdr', '.set'] and check_version('mne', '0.18'):
        events, event_id = events_from_annotations(raw,
                                                   event_id,
                                                   verbose=verbose)
    else:
        warn('No events found or provided. Please make sure to'
             ' set channel type using raw.set_channel_types'
             ' or provide events_data.')
        events = None
    return events, event_id
Пример #15
0
    def __init__(self,
                 n_components=4,
                 reg=None,
                 log=None,
                 cov_est="concat",
                 transform_into='average_power',
                 norm_trace=None):
        """Init of CSP."""
        # Init default CSP
        if not isinstance(n_components, int):
            raise ValueError('n_components must be an integer.')
        self.n_components = n_components

        # Init default regularization
        if ((reg is not None) and (reg not in ['oas', 'ledoit_wolf'])
                and ((not isinstance(reg, (float, int))) or
                     (not ((reg <= 1.) and (reg >= 0.))))):
            raise ValueError('reg must be None, "oas", "ledoit_wolf" or a '
                             'float in between 0. and 1.')
        self.reg = reg

        # Init default cov_est
        if not (cov_est == "concat" or cov_est == "epoch"):
            raise ValueError("unknown covariance estimation method")
        self.cov_est = cov_est

        # Init default transform_into
        if transform_into not in ('average_power', 'csp_space'):
            raise ValueError('transform_into must be "average_power" or '
                             '"csp_space".')
        self.transform_into = transform_into

        # Init default log
        if transform_into == 'average_power':
            if log is not None and not isinstance(log, bool):
                raise ValueError('log must be a boolean if transform_into == '
                                 '"average_power".')
        else:
            if log is not None:
                raise ValueError('log must be a None if transform_into == '
                                 '"csp_space".')
        self.log = log
        if norm_trace is None:
            norm_trace = True
            warn(
                "norm_trace defaults to True in 0.15, but will change to "
                "False in 0.16. Set it explicitly to avoid this warning.",
                DeprecationWarning)

        if not isinstance(norm_trace, bool):
            raise ValueError('norm_trace must be a bool.')
        self.norm_trace = norm_trace
Пример #16
0
def picks_pair_to_idx(raw, sd_pairs, on_missing='error'):
    """
    Return a list of picks for specified source detector pairs.

    If multiple channel have the same source detector pair,
    for example if there are multiple wavelengths or chromaphore, then
    all channels matching the requested sources and detectors will be
    returned.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    sd_pairs : list of lists
        List of source detector pairs. For example, to request the picks for
        channels comprising of source 1 detector 4 and source 13
        detector 4 you would specify [[1, 4], [13, 4]].
    on_missing : str
        What to do if one or several requested source detector are not found
        in the recording.
        Valid keys are 'error' | 'warning' | 'ignore'
        Default is 'error'. If on_missing is 'warning' it will proceed but
        warn, if 'ignore' it will proceed silently. Note.
        If none of the event ids are found in the data, an error will be
        automatically generated irrespective of this parameter.

    Returns
    -------
    picks : list of integers
        List of picks corresponding to requested source detector pairs.
    """

    ch_names = raw.ch_names
    picks = list()

    for pair in sd_pairs:
        pair_name = "S" + str(pair[0]) + "_D" + str(pair[1]) + " "
        pair_picks = np.where([pair_name in ch for ch in ch_names])[0]
        if len(pair_picks) == 0:
            msg = ('No matching channels found for source %s '
                   'detector %s' % (pair[0], pair[1]))
            if on_missing == 'error':
                print(pair_picks)
                raise ValueError(msg)
            elif on_missing == 'warning':
                warn(msg)
            else:
                # on_missing == 'ignore':
                continue
        [picks.append(pick) for pick in pair_picks]

    return picks
Пример #17
0
def _handle_channels_reading(channels_fname, raw):
    """Read associated channels.tsv and populate raw.

    Updates status (bad) and types of channels.
    """
    logger.info('Reading channel info from {}.'.format(channels_fname))
    channels_dict = _from_tsv(channels_fname)
    ch_names_tsv = channels_dict['name']

    # Now we can do some work.
    # The "type" column is mandatory in BIDS. We can use it to set channel
    # types in the raw data using a mapping between channel types
    channel_type_dict = dict()

    # Get the best mapping we currently have from BIDS to MNE nomenclature
    bids_to_mne_ch_types = _get_ch_type_mapping(fro='bids', to='mne')
    ch_types_json = channels_dict['type']
    for ch_name, ch_type in zip(ch_names_tsv, ch_types_json):

        # Try to map from BIDS nomenclature to MNE, leave channel type
        # untouched if we are uncertain
        updated_ch_type = bids_to_mne_ch_types.get(ch_type, None)

        if updated_ch_type is None:
            # XXX Try again with uppercase spelling – this should be removed
            # XXX once https://github.com/bids-standard/bids-validator/issues/1018  # noqa:E501
            # XXX has been resolved.
            # XXX x-ref https://github.com/mne-tools/mne-bids/issues/481
            updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None)
            if updated_ch_type is not None:
                msg = ('The BIDS dataset contains channel types in lowercase '
                       'spelling. This violates the BIDS specification and '
                       'will raise an error in the future.')
                warn(msg)

        if updated_ch_type is not None:
            channel_type_dict[ch_name] = updated_ch_type

    # Rename channels in loaded Raw to match those read from the BIDS sidecar
    for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()):
        if bids_ch_name != raw_ch_name:
            raw.rename_channels({raw_ch_name: bids_ch_name})

    # Set the channel types in the raw data according to channels.tsv
    raw.set_channel_types(channel_type_dict)

    # Set bad channels based on _channels.tsv sidecar
    if 'status' in channels_dict:
        bads = _get_bads_from_tsv_data(channels_dict)
        raw.info['bads'] = bads

    return raw
Пример #18
0
def _get_onset_event_id(events, event_id):
    if event_id is None:
        return None

    # cast to numpy array
    events = np.asarray(events)

    if event_id not in events[:, 2]:
        warn(f"{event_id} event ID is not inside the events data structure.")
        return None

    event_ind = np.where(events[:, 2] == event_id)[0][0]
    return events[event_ind, 0]
Пример #19
0
    def __init__(self, input_fname, montage, eog=(), event_id=None,
                 event_id_func='strip_to_integer', preload=False,
                 verbose=None):
        """Read ANT .cnt file.
        """
        #from scipy import io
        import libeep
        #basedir = op.dirname(input_fname)
        eeg= libeep.read_cnt(input_fname)

        last_samps = [eeg.get_sample_count() - 1]
        info = _get_info(eeg, montage, eog=eog)

        stim_chan = dict(ch_name='STI 014', coil_type=FIFF.FIFFV_COIL_NONE,
                         kind=FIFF.FIFFV_STIM_CH, logno=len(info["chs"]) + 1,
                         scanno=len(info["chs"]) + 1, cal=1., range=1.,
                         loc=np.zeros(12), unit=FIFF.FIFF_UNIT_NONE,
                         unit_mul=0., coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
        info['chs'].append(stim_chan)
        info._update_redundant()

        events = _read_antcnt_events(eeg, event_id=event_id,
                                     event_id_func=event_id_func)
        self._create_event_ch(events, n_samples=eeg.get_sample_count())

        # read the data

        if preload is False or isinstance(preload, string_types):
            warn('Data will be preloaded. preload=False or a string '
                 'preload is not supported when the data is stored in '
                 'the .cnt file')
        # don't know how to implement preload = F

        n_chan = eeg.get_channel_count()
        n_times = eeg.get_sample_count()

        data = np.empty((n_chan+1, n_times), dtype=np.double)
        from numpy import asarray
        x = asarray(eeg.get_samples(0,n_times))
        x.shape  = (n_times,n_chan)

        data[:-1] = x.transpose()


        data *= CAL
        data[-1] = self._event_ch
        super(RawANTCNT, self).__init__(
            info, data, last_samps=last_samps, orig_format='double',
            verbose=verbose)
Пример #20
0
def _reg_pinv(x, reg):
    """Compute a regularized pseudoinverse of a square array."""
    if reg == 0:
        covrank = estimate_rank(x,
                                tol='auto',
                                norm=False,
                                return_singular=False)
        if covrank < x.shape[0]:
            warn('Covariance matrix is rank-deficient, but no regularization '
                 'is done.')

    # This adds it to the diagonal without using np.eye
    d = reg * np.trace(x) / len(x)
    x.flat[::x.shape[0] + 1] += d
    return linalg.pinv(x), d
Пример #21
0
    def fit(self, X, y=None):
        """Fit the model according to the optionally given training data.

        Parameters
        ----------
        X : mne.io.Raw of shape (n_samples, n_features) | pd.DataFrame
            Training vector, where n_samples is the number of samples and
            n_features is the number of features. In MNE-HFO, n_features
            are the number of time points in the EEG data, and n_samples
            are the number of channels.

        y : array-like of shape (n_samples, n_output)
            Target vector relative to X.

        Returns
        -------
        self
            Fitted estimator.

        Notes
        -----
        All detectors use a sliding window to compute HFOs in windows.
        """
        X, y = self._check_input_raw(X, y)

        sfreq = self.sfreq
        if sfreq < MINIMUM_SUGGESTED_SFREQ:
            warn(f'Sampling frequency of {sfreq} is '
                 f'below the suggested rate of {MINIMUM_SUGGESTED_SFREQ}. '
                 f'Please use with caution.')

        # compute HFO related statistic for the detector
        hfo_statistic_arr = self._compute_hfo_statistic(X)

        # apply the threshold(s) to the statistic to get detections
        hfo_detection_arr = self._threshold_statistic(hfo_statistic_arr)

        # merge contiguous detections into discrete hfo events
        # store hfo event endpoints per channel
        chs_hfos = {
            ch_name: self._post_process_ch_hfos(hfo_detection_arr[idx], idx)
            for idx, ch_name in enumerate(self.ch_names)
        }

        self.chs_hfos_ = chs_hfos
        self.hfo_event_arr_ = hfo_statistic_arr
        self._create_annotation_df(self.chs_hfos_dict, self.hfo_name)
        return self
Пример #22
0
def _tidy_RegressionResults(data, glm_est, design_matrix):

    if not (data.ch_names == list(glm_est.keys())):
        warn("MNE data structure does not match regression results")

    theta_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
    t_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
    df_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
    p_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
    mse_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))

    for idx, name in enumerate(glm_est.keys()):
        theta_estimates[idx, :] = glm_est[name].theta.T
        df_estimates[idx, :] = glm_est[name].df_model
        mse_estimates[idx, :] = glm_est[name].MSE[0]
        for cond_idx, cond in enumerate(design_matrix.columns):
            t_estimates[idx, cond_idx] = glm_est[name].t(
                column=cond_idx)
            p_estimates[idx, cond_idx] = 2 * stats.t.cdf(
                -1.0 * np.abs(t_estimates[idx, cond_idx]),
                df=df_estimates[idx, cond_idx])

    df = pd.DataFrame()

    for ch_idx, ch in enumerate(data.ch_names):
        for cond_idx, cond in enumerate(design_matrix.columns):
            df = df.append({'ch_name': ch, 'condition': cond,
                            'variable': "theta",
                            'value': theta_estimates[ch_idx][cond_idx]},
                           ignore_index=True)
            df = df.append({'ch_name': ch, 'condition': cond,
                            'variable': "t",
                            'value': t_estimates[ch_idx][cond_idx]},
                           ignore_index=True)
            df = df.append({'ch_name': ch, 'condition': cond,
                            'variable': "df",
                            'value': df_estimates[ch_idx][cond_idx]},
                           ignore_index=True)
            df = df.append({'ch_name': ch, 'condition': cond,
                            'variable': "p_value",
                            'value': p_estimates[ch_idx][cond_idx]},
                           ignore_index=True)
            df = df.append({'ch_name': ch, 'condition': cond,
                            'variable': "mse",
                            'value': mse_estimates[ch_idx][cond_idx]},
                           ignore_index=True)

    return df
Пример #23
0
def _read_antcnt_events(eeg, event_id=None, event_id_func='strip_to_integer'):
        """Create events array from ANT cnt structure
        An event array is constructed by looking up events in the
        event_id, trying to reduce them to their integer part otherwise, and
        entirely dropping them (with a warning) if this is impossible.
        Returns a 1x3 array of zeros if no events are found."""
        if event_id_func is 'strip_to_integer':
            event_id_func = _strip_to_integer
        if event_id is None:
            event_id = dict()
    
        types = [eeg.get_trigger(i)[0] for i in range(eeg.get_trigger_count())]
        latencies= [eeg.get_trigger(i)[1] for i in range(eeg.get_trigger_count())]



        if len(types) < 1:  # if there are 0 events, we can exit here
            logger.info('No events found, returning empty stim channel ...')
            return np.zeros((0, 3))

        not_in_event_id = set(x for x in types if x not in event_id)
        not_purely_numeric = set(x for x in not_in_event_id if not x.isdigit())
        no_numbers = set([x for x in not_purely_numeric
                          if not any([d.isdigit() for d in x])])
        have_integers = set([x for x in not_purely_numeric
                             if x not in no_numbers])
        if len(not_purely_numeric) > 0:
            basewarn = "Events like the following will be dropped"
            n_no_numbers, n_have_integers = len(no_numbers), len(have_integers)
            if n_no_numbers > 0:
                no_num_warm = " entirely: {0}, {1} in total"
                warn(basewarn + no_num_warm.format(list(no_numbers)[:5],
                                                   n_no_numbers))
            if n_have_integers > 0 and event_id_func is None:
                intwarn = (", but could be reduced to their integer part "
                           "instead with the default `event_id_func`: "
                           "{0}, {1} in total")
                warn(basewarn + intwarn.format(list(have_integers)[:5],
                                               n_have_integers))

        events = list()
        for tt, latency in zip(types, latencies):
            try:  # look up the event in event_id and if not, try event_id_func
                event_code = event_id[tt] if tt in event_id else event_id_func(tt)
                events.append([int(latency), 1, event_code])
            except (ValueError, TypeError):  # if event_id_func fails
                pass  # We're already raising warnings above, so we just drop

        if len(events) < len(types):
            missings = len(types) - len(events)
            msg = ("{0}/{1} event codes could not be mapped to integers. Use "
                   "the 'event_id' parameter to map such events manually.")
            warn(msg.format(missings, len(types)))
            if len(events) < 1:
                warn("As is, the trigger channel will consist entirely of zeros.")
                return np.zeros((0, 3))

        return np.asarray(events)
Пример #24
0
def _summarize_scans(root, session=None, verbose=True):
    """Summarize scans in BIDS root directory.

    Summarizes scans only if there is a *_scans.tsv file.

    Parameters
    ----------
    root : str | pathlib.Path
        The path of the root of the BIDS compatible folder.
    session : str, optional
        The session for a item. Corresponds to "ses".
    verbose : bool
        Set verbose output to true or false.

    Returns
    -------
    template_dict : dict
        A dictionary of values for various template strings.
    """
    root = Path(root)
    if session is None:
        search_str = '*_scans.tsv'
    else:
        search_str = f'*ses-{session}' \
                     f'*_scans.tsv'
    scans_fpaths = list(root.rglob(search_str))
    if len(scans_fpaths) == 0:
        warn('No *scans.tsv files found. Currently, '
             'we do not generate a report without the scans.tsv files.')
        return dict()

    if verbose:
        print(f'Summarizing scans.tsv files {scans_fpaths}...')

    # summarize sidecar.json, channels.tsv template
    sidecar_dict = _summarize_sidecar_json(root, scans_fpaths, verbose=verbose)
    channels_dict = _summarize_channels_tsv(root,
                                            scans_fpaths,
                                            verbose=verbose)
    template_dict = dict()
    template_dict.update(**sidecar_dict)
    template_dict.update(**channels_dict)

    return template_dict
Пример #25
0
def run_GLM(raw, design_matrix, noise_model='ar1', bins=0,
            n_jobs=1, verbose=0):
    """
    Run GLM on data using supplied design matrix.

    This is a wrapper function for nilearn.stats.first_level_model.run_glm.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    design_matrix : as specified in Nilearn
        The design matrix.
    noise_model : {'ar1', 'ols', 'arN', 'auto'}, optional
        The temporal variance model. Defaults to first order
        auto regressive model 'ar1'.
        The AR model can be set to any integer value by modifying the value
        of N. E.g. use `ar5` for a fifth order model.
        If the string `auto` is provided a model with order 4 times the sample
        rate will be used.
    bins : int, optional
        Maximum number of discrete bins for the AR coef histogram/clustering.
        By default the value is 0, which will set the number of bins to the
        number of channels, effectively estimating the AR model for each
        channel.
    n_jobs : int, optional
        The number of CPUs to use to do the computation. -1 means
        'all CPUs'.
    verbose : int, optional
        The verbosity level. Default is 0.
    Returns
    -------
    glm_estimates : dict
        Keys correspond to the different labels values values are
        RegressionResults instances corresponding to the voxels.
    """
    warn('"run_GLM" has been deprecated in favor of the more '
         'comprehensive run_glm function, and will be removed in v1.0.0. '
         'See the changelog for further details.',
         DeprecationWarning)
    res = run_glm(raw, design_matrix, noise_model=noise_model, bins=bins,
                  n_jobs=n_jobs, verbose=verbose)
    return res.data
Пример #26
0
def glm_region_of_interest(glm, group_by, cond_idx, cond_name, weighted=True):
    """
    Calculate statistics for region of interest.

    Parameters
    ----------
    glm : dict
        Need to write.
    group_by : dict
        Specifies which channels are aggregated into a single ROI.
        The dict key will be used as the ROI label and the dict
        values must be lists of picks (either channel names or integer indices
        of ``epochs.ch_names``). For example::

            group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])

        Note that within a dict entry all channels must have the same type.
    cond_idx : int
        Index of condition of interest.
    cond_name : str
        Name to be used for condition.
    weighted : Bool
        Should channels be weighted by inverse of standard error (True).

    Returns
    -------
    stats : DataFrame
        Statistics for each ROI.
    """
    warn(
        '"glm_region_of_interest" has been deprecated in favor of the more '
        'comprehensive GLM class and will be removed in v1.0.0. '
        'Use the RegressionResults class "region_of_interest_dataframe()" '
        'method instead.', DeprecationWarning)

    return _glm_region_of_interest(glm,
                                   group_by,
                                   cond_idx,
                                   cond_name,
                                   weighted=weighted)
Пример #27
0
def _check_anonymize(anonymize, raw, ext):
    """Check the `anonymize` dict."""
    # if info['meas_date'] None, then the dates are not stored
    if raw.info['meas_date'] is None:
        daysback = None
    else:
        if 'daysback' not in anonymize or anonymize['daysback'] is None:
            raise ValueError('`daysback` argument required to anonymize.')
        daysback = anonymize['daysback']
        daysback_min, daysback_max = _get_anonymization_daysback(raw)
        if daysback < daysback_min:
            warn('`daysback` is too small; the measurement date '
                 'is after 1925, which is not recommended by BIDS.'
                 'The minimum `daysback` value for changing the '
                 'measurement date of this data to before this date '
                 'is %s' % daysback_min)
        if ext == '.fif' and daysback > daysback_max:
            raise ValueError('`daysback` exceeds maximum value MNE '
                             'is able to store in FIF format, must '
                             'be less than %i' % daysback_max)
    keep_his = anonymize['keep_his'] if 'keep_his' in anonymize else False
    return daysback, keep_his
Пример #28
0
def _handle_info_reading(sidecar_fname, raw, verbose=None):
    """Read associated sidecar.json and populate raw.

    Handle PowerLineFrequency of recording.
    """
    with open(sidecar_fname, "r") as fin:
        sidecar_json = json.load(fin)

    # read in the sidecar JSON's line frequency
    line_freq = sidecar_json.get("PowerLineFrequency")
    if line_freq == "n/a":
        line_freq = None

    if line_freq is None and raw.info["line_freq"] is None:
        # estimate line noise using PSD from multitaper FFT
        powerlinefrequency = _estimate_line_freq(raw, verbose=verbose)
        raw.info["line_freq"] = powerlinefrequency
        warn('No line frequency found, defaulting to {} Hz '
             'estimated from multi-taper FFT '
             'on 10 seconds of data.'.format(powerlinefrequency))

    elif raw.info["line_freq"] is None and line_freq is not None:
        # if the read in frequency is not set inside Raw
        # -> set it to what the sidecar JSON specifies
        raw.info["line_freq"] = line_freq
    elif raw.info["line_freq"] is not None \
            and line_freq is not None:
        # if both have a set Power Line Frequency, then
        # check that they are the same, else there is a
        # discrepency in the metadata of the dataset.
        if raw.info["line_freq"] != line_freq:
            raise ValueError(
                "Line frequency in sidecar json does "
                "not match the info datastructure of "
                "the mne.Raw. "
                "Raw is -> {} ".format(raw.info["line_freq"]),
                "Sidecar JSON is -> {} ".format(line_freq))

    return raw
Пример #29
0
def _read_dig_bids(electrodes_fpath, coordsystem_fpath,
                   raw, datatype, verbose):
    """Read MNE-Python formatted DigMontage from BIDS files.

    Handles coordinatesystem.json and electrodes.tsv reading
    to DigMontage.

    Parameters
    ----------
    electrodes_fpath : str
        Filepath of the electrodes.tsv to read.
    coordsystem_fpath : str
        Filepath of the coordsystem.json to read.
    raw : instance of Raw
        The data as MNE-Python Raw object.
    datatype : str
        Type of the data recording. Can be ``meg``, ``eeg``,
        or ``ieeg``.
    verbose : bool
        Set verbose output to true or false.

    Returns
    -------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    """
    # get the space entity
    params = get_entities_from_fname(electrodes_fpath)
    space = params['space']
    if space is None:
        space = ''
    space = space.lower()

    # read in coordinate information
    coord_frame, coord_unit = _handle_coordsystem_reading(coordsystem_fpath,
                                                          datatype, verbose)

    if datatype == 'meg':
        if coord_frame not in BIDS_MEG_COORDINATE_FRAMES:
            warn("MEG Coordinate frame is not accepted "
                 "BIDS keyword. The allowed keywords are: "
                 "{}".format(BIDS_MEG_COORDINATE_FRAMES))
            coord_frame = None
        elif coord_frame == 'other':
            warn("Coordinate frame of MEG data can't be determined "
                 "when 'other'. The currently accepted keywords are: "
                 "{}".format(BIDS_MEG_COORDINATE_FRAMES))
            coord_frame = None
        else:
            coord_frame = BIDS_TO_MNE_FRAMES.get(coord_frame, None)
    elif datatype == 'ieeg':
        if coord_frame not in BIDS_IEEG_COORDINATE_FRAMES:
            warn("iEEG Coordinate frame is not accepted "
                 "BIDS keyword. The allowed keywords are: "
                 "{}".format(BIDS_IEEG_COORDINATE_FRAMES))
            coord_frame = None
        elif coord_frame == 'pixels':
            warn("Coordinate frame of iEEG data in pixels does not "
                 "get read in by mne-python. Skipping reading of "
                 "electrodes.tsv ...")
            coord_frame = None
        elif coord_frame == 'acpc':
            coord_frame = BIDS_TO_MNE_FRAMES.get(coord_frame, None)
        elif coord_frame == 'other':
            # XXX: We allow 'other' coordinate frames, but must be mne-python
            if space not in BIDS_TO_MNE_FRAMES:
                # default coordinate frames to available ones in mne-python
                # noqa: see https://bids-specification.readthedocs.io/en/stable/99-appendices/08-coordinate-systems.html
                warn("Defaulting coordinate frame to unknown "
                     "from coordinate system input {}".format(coord_frame))
            coord_frame = BIDS_TO_MNE_FRAMES.get(space, None)
    elif datatype == 'eeg':
        # only accept captrak
        if coord_frame not in BIDS_EEG_COORDINATE_FRAMES:
            warn("EEG Coordinate frame is not accepted "
                 "BIDS keyword. The allowed keywords are: "
                 "{}".format(BIDS_IEEG_COORDINATE_FRAMES))
            coord_frame = None
        else:
            coord_frame = BIDS_TO_MNE_FRAMES.get(coord_frame, None)

    # check coordinate units
    if coord_unit not in BIDS_COORDINATE_UNITS:
        warn("Coordinate unit is not an accepted BIDS unit for {}. "
             "Please specify to be one of {}. Skipping electrodes.tsv "
             "reading..."
             .format(electrodes_fpath, BIDS_COORDINATE_UNITS))
        coord_frame = None

    # only set montage if coordinate frame was properly parsed
    if coord_frame is not None:
        # read in electrode coordinates and attach to raw
        raw = _handle_electrodes_reading(electrodes_fpath, coord_frame,
                                         coord_unit, raw, verbose)

    return raw
Пример #30
0
def _write_dig_bids(bids_path, raw, overwrite=False, verbose=True):
    """Write BIDS formatted DigMontage from Raw instance.

    Handles coordinatesystem.json and electrodes.tsv writing
    from DigMontage.

    Parameters
    ----------
    bids_path : BIDSPath
        Path in the BIDS dataset to save the ``electrodes.tsv``
        and ``coordsystem.json`` file for. ``datatype``
        attribute must be ``eeg``, or ``ieeg``. For ``meg``
        data, ``electrodes.tsv`` are not saved.
    raw : instance of Raw
        The data as MNE-Python Raw object.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.
    """
    # write electrodes data for iEEG and EEG
    unit = "m"  # defaults to meters

    # get coordinate frame from digMontage
    digpoint = raw.info['dig'][0]
    if any(digpoint['coord_frame'] != _digpoint['coord_frame']
           for _digpoint in raw.info['dig']):
        warn("Not all digpoints have the same coordinate frame. "
             "Skipping electrodes.tsv writing...")
        return

    # get the accepted mne-python coordinate frames
    coord_frame_int = int(digpoint['coord_frame'])
    mne_coord_frame = MNE_FRAME_TO_STR.get(coord_frame_int, None)
    coord_frame = MNE_TO_BIDS_FRAMES.get(mne_coord_frame, None)

    # create electrodes/coordsystem files using a subset of entities
    # that are specified for these files in the specification
    coord_file_entities = {
        'root': bids_path.root,
        'datatype': bids_path.datatype,
        'subject': bids_path.subject,
        'session': bids_path.session,
        'acquisition': bids_path.acquisition,
        'space': bids_path.space
    }
    datatype = bids_path.datatype
    electrodes_path = BIDSPath(**coord_file_entities, suffix='electrodes',
                               extension='.tsv')
    coordsystem_path = BIDSPath(**coord_file_entities, suffix='coordsystem',
                                extension='.json')

    if verbose:
        print("Writing electrodes file to... ", electrodes_path)
        print("Writing coordsytem file to... ", coordsystem_path)

    if datatype == "ieeg":
        if coord_frame is not None:
            # XXX: To improve when mne-python allows coord_frame='unknown'
            if coord_frame not in BIDS_IEEG_COORDINATE_FRAMES:
                coordsystem_path.update(space=coord_frame)
                electrodes_path.update(space=coord_frame)
                coord_frame = 'Other'

            # Now write the data to the elec coords and the coordsystem
            _electrodes_tsv(raw, electrodes_path,
                            datatype, overwrite, verbose)
            _coordsystem_json(raw=raw, unit=unit, hpi_coord_system='n/a',
                              sensor_coord_system=coord_frame,
                              fname=coordsystem_path, datatype=datatype,
                              overwrite=overwrite, verbose=verbose)
        else:
            # default coordinate frame to mri if not available
            warn("Coordinate frame of iEEG coords missing/unknown "
                 "for {}. Skipping reading "
                 "in of montage...".format(electrodes_path))
    elif datatype == 'eeg':
        # We only write EEG electrodes.tsv and coordsystem.json
        # if we have LPA, RPA, and NAS available to rescale to a known
        # coordinate system frame
        coords = _extract_landmarks(raw.info['dig'])
        landmarks = set(['RPA', 'NAS', 'LPA']) == set(list(coords.keys()))

        # XXX: to be improved to allow rescaling if landmarks are present
        # mne-python automatically converts unknown coord frame to head
        if coord_frame_int == FIFF.FIFFV_COORD_HEAD and landmarks:
            # Now write the data
            _electrodes_tsv(raw, electrodes_path, datatype,
                            overwrite, verbose)
            _coordsystem_json(raw=raw, unit='m', hpi_coord_system='n/a',
                              sensor_coord_system='CapTrak',
                              fname=coordsystem_path, datatype=datatype,
                              overwrite=overwrite, verbose=verbose)
        else:
            warn("Skipping EEG electrodes.tsv... "
                 "Setting montage not possible if anatomical "
                 "landmarks (NAS, LPA, RPA) are missing, "
                 "and coord_frame is not 'head'.")
Пример #31
0
def _handle_electrodes_reading(electrodes_fname, coord_frame,
                               coord_unit, raw, verbose):
    """Read associated electrodes.tsv and populate raw.

    Handle xyz coordinates and coordinate frame of each channel.
    Assumes units of coordinates are in 'm'.
    """
    logger.info('Reading electrode '
                'coords from {}.'.format(electrodes_fname))
    electrodes_dict = _from_tsv(electrodes_fname)
    # First, make sure that ordering of names in channels.tsv matches the
    # ordering of names in the raw data. The "name" column is mandatory in BIDS
    ch_names_raw = list(raw.ch_names)
    ch_names_tsv = electrodes_dict['name']

    if ch_names_raw != ch_names_tsv:
        msg = ('Channels do not correspond between raw data and the '
               'channels.tsv file. For MNE-BIDS, the channel names in the '
               'tsv MUST be equal and in the same order as the channels in '
               'the raw data.\n\n'
               '{} channels in tsv file: "{}"\n\n --> {}\n\n'
               '{} channels in raw file: "{}"\n\n --> {}\n\n'
               .format(len(ch_names_tsv), electrodes_fname, ch_names_tsv,
                       len(ch_names_raw), raw.filenames, ch_names_raw)
               )

        # XXX: this could be due to MNE inserting a 'STI 014' channel as the
        # last channel: In that case, we can work. --> Can be removed soon,
        # because MNE will stop the synthesis of stim channels in the near
        # future
        if not (ch_names_raw[-1] == 'STI 014' and
                ch_names_raw[:-1] == ch_names_tsv):
            raise RuntimeError(msg)

    if verbose:
        summary_str = [(ch, coord) for idx, (ch, coord)
                       in enumerate(electrodes_dict.items())
                       if idx < 5]
        print("The read in electrodes file is: \n", summary_str)

    def _float_or_nan(val):
        if val == "n/a":
            return np.nan
        else:
            return float(val)

    # convert coordinates to float and create list of tuples
    electrodes_dict['x'] = [_float_or_nan(x) for x in electrodes_dict['x']]
    electrodes_dict['y'] = [_float_or_nan(x) for x in electrodes_dict['y']]
    electrodes_dict['z'] = [_float_or_nan(x) for x in electrodes_dict['z']]
    ch_names_raw = [x for i, x in enumerate(ch_names_raw)
                    if electrodes_dict['x'][i] != "n/a"]
    ch_locs = np.c_[electrodes_dict['x'],
                    electrodes_dict['y'],
                    electrodes_dict['z']]

    # determine if there are problematic channels
    nan_chs = []
    for ch_name, ch_coord in zip(ch_names_raw, ch_locs):
        if any(np.isnan(ch_coord)) and ch_name not in raw.info['bads']:
            nan_chs.append(ch_name)
    if len(nan_chs) > 0:
        warn("There are channels without locations "
             "(n/a) that are not marked as bad: {}".format(nan_chs))

    # convert coordinates to meters
    ch_locs = _scale_coord_to_meters(ch_locs, coord_unit)

    # create mne.DigMontage
    ch_pos = dict(zip(ch_names_raw, ch_locs))
    montage = mne.channels.make_dig_montage(ch_pos=ch_pos,
                                            coord_frame=coord_frame)
    raw.set_montage(montage)
    return raw
Пример #32
0
def _read_events(events_data, event_id, raw, verbose=None):
    """Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations.

    Parameters
    ----------
    events_data : str | np.ndarray | None
        If a string, a path to an events file. If an array, an MNE events array
        (shape n_events, 3). If None, events will be generated from
        ``raw.annotations``.
    event_id : dict | None
        The event id dict used to create a 'trial_type' column in events.tsv,
        mapping a description key to an integer-valued event code.
    raw : mne.io.Raw
        The data as MNE-Python Raw object.
    verbose : bool | str | int | None
        If not None, override default verbose level (see :func:`mne.verbose`).

    Returns
    -------
    all_events : np.ndarray, shape = (n_events, 3)
        The first column contains the event time in samples and the third
        column contains the event id. The second column is ignored for now but
        typically contains the value of the trigger channel either immediately
        before the event or immediately after.
    all_dur : np.ndarray, shape (n_events,)
        The event durations in seconds.
    all_desc : dict
        A dictionary with the keys corresponding to the event descriptions and
        the values to the event IDs.

    """
    # get events from events_data
    if isinstance(events_data, str):
        events = read_events(events_data, verbose=verbose).astype(int)
    elif isinstance(events_data, np.ndarray):
        if events_data.ndim != 2:
            raise ValueError('Events must have two dimensions, '
                             f'found {events_data.ndim}')
        if events_data.shape[1] != 3:
            raise ValueError('Events must have second dimension of length 3, '
                             f'found {events_data.shape[1]}')
        events = events_data
    else:
        events = np.empty(shape=(0, 3), dtype=int)

    if events.size > 0:
        # Only keep events for which we have an ID <> description mapping.
        ids_without_desc = set(events[:, 2]) - set(event_id.values())
        if ids_without_desc:
            raise ValueError(
                f'No description was specified for the following event(s): '
                f'{", ".join([str(x) for x in sorted(ids_without_desc)])}. '
                f'Please add them to the event_id dictionary, or drop them '
                f'from the events_data array.')
        del ids_without_desc
        mask = [e in list(event_id.values()) for e in events[:, 2]]
        events = events[mask]

        # Append events to raw.annotations. All event onsets are relative to
        # measurement beginning.
        id_to_desc_map = dict(zip(event_id.values(), event_id.keys()))
        # We don't pass `first_samp`, as set_annotations() below will take
        # care of this shift automatically.
        new_annotations = mne.annotations_from_events(
            events=events,
            sfreq=raw.info['sfreq'],
            event_desc=id_to_desc_map,
            orig_time=raw.annotations.orig_time,
            verbose=verbose)

        raw = raw.copy()  # Don't alter the original.
        annotations = raw.annotations.copy()

        # We use `+=` here because `Annotations.__iadd__()` does the right
        # thing and also performs a sanity check on `Annotations.orig_time`.
        annotations += new_annotations
        raw.set_annotations(annotations)
        del id_to_desc_map, annotations, new_annotations

    # Now convert the Annotations to events.
    all_events, all_desc = events_from_annotations(
        raw,
        event_id=event_id,
        regexp=None,  # Include `BAD_` and `EDGE_` Annotations, too.
        verbose=verbose)
    all_dur = raw.annotations.duration
    if all_events.size == 0 and 'rest' not in raw.filenames[0]:
        warn('No events found or provided. Please add annotations '
             'to the raw data, or provide the events_data and '
             'event_id parameters. If this is resting state data '
             'it is recommended to name the task "rest".')

    return all_events, all_dur, all_desc
Пример #33
0
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
                 res=64, axes=None, names=None, show_names=False, mask=None,
                 mask_params=None, outlines='head', image_mask=None,
                 contours=6, image_interp='bilinear', show=True,
                 head_pos=None, onselect=None, axis=None):
    ''' see the docstring for mne.viz.plot_topomap,
        which i've simply modified to return more objects '''

    from matplotlib.widgets import RectangleSelector
    from mne.io.pick import (channel_type, pick_info, _pick_data_channels)
    from mne.utils import warn
    from mne.viz.utils import (_setup_vmin_vmax, plt_show)
    from mne.defaults import _handle_default
    from mne.channels.layout import _find_topomap_coords
    from mne.io.meas_info import Info
    from mne.viz.topomap import _check_outlines, _prepare_topomap, _griddata, _make_image_mask, _plot_sensors, \
        _draw_outlines

    data = np.asarray(data)

    if isinstance(pos, Info):  # infer pos from Info object
        picks = _pick_data_channels(pos)  # pick only data channels
        pos = pick_info(pos, picks)

        # check if there is only 1 channel type, and n_chans matches the data
        ch_type = set(channel_type(pos, idx)
                      for idx, _ in enumerate(pos["chs"]))
        info_help = ("Pick Info with e.g. mne.pick_info and "
                     "mne.channels.channel_indices_by_type.")
        if len(ch_type) > 1:
            raise ValueError("Multiple channel types in Info structure. " +
                             info_help)
        elif len(pos["chs"]) != data.shape[0]:
            raise ValueError("Number of channels in the Info object and "
                             "the data array does not match. " + info_help)
        else:
            ch_type = ch_type.pop()

        if any(type_ in ch_type for type_ in ('planar', 'grad')):
            # deal with grad pairs
            from ..channels.layout import (_merge_grad_data, find_layout,
                                           _pair_grad_sensors)
            picks, pos = _pair_grad_sensors(pos, find_layout(pos))
            data = _merge_grad_data(data[picks]).reshape(-1)
        else:
            picks = list(range(data.shape[0]))
            pos = _find_topomap_coords(pos, picks=picks)

    if data.ndim > 1:
        raise ValueError("Data needs to be array of shape (n_sensors,); got "
                         "shape %s." % str(data.shape))

    # Give a helpful error message for common mistakes regarding the position
    # matrix.
    pos_help = ("Electrode positions should be specified as a 2D array with "
                "shape (n_channels, 2). Each row in this matrix contains the "
                "(x, y) position of an electrode.")
    if pos.ndim != 2:
        error = ("{ndim}D array supplied as electrode positions, where a 2D "
                 "array was expected").format(ndim=pos.ndim)
        raise ValueError(error + " " + pos_help)
    elif pos.shape[1] == 3:
        error = ("The supplied electrode positions matrix contains 3 columns. "
                 "Are you trying to specify XYZ coordinates? Perhaps the "
                 "mne.channels.create_eeg_layout function is useful for you.")
        raise ValueError(error + " " + pos_help)
    # No error is raised in case of pos.shape[1] == 4. In this case, it is
    # assumed the position matrix contains both (x, y) and (width, height)
    # values, such as Layout.pos.
    elif pos.shape[1] == 1 or pos.shape[1] > 4:
        raise ValueError(pos_help)

    if len(data) != len(pos):
        raise ValueError("Data and pos need to be of same length. Got data of "
                         "length %s, pos of length %s" % (len(data), len(pos)))

    norm = min(data) >= 0
    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
    if cmap is None:
        cmap = 'Reds' if norm else 'RdBu_r'

    pos, outlines = _check_outlines(pos, outlines, head_pos)

    if axis is not None:
        axes = axis
        warn('axis parameter is deprecated and will be removed in 0.13. '
             'Use axes instead.', DeprecationWarning)
    ax = axes if axes else plt.gca()
    pos_x, pos_y = _prepare_topomap(pos, ax)
    if outlines is None:
        xmin, xmax = pos_x.min(), pos_x.max()
        ymin, ymax = pos_y.min(), pos_y.max()
    else:
        xlim = np.inf, -np.inf,
        ylim = np.inf, -np.inf,
        mask_ = np.c_[outlines['mask_pos']]
        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
                      np.max(np.r_[xlim[1], mask_[:, 0]]))
        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
                      np.max(np.r_[ylim[1], mask_[:, 1]]))

    # interpolate data
    xi = np.linspace(xmin, xmax, res)
    yi = np.linspace(ymin, ymax, res)
    Xi, Yi = np.meshgrid(xi, yi)
    Zi = _griddata(pos_x, pos_y, data, Xi, Yi)

    if outlines is None:
        _is_default_outlines = False
    elif isinstance(outlines, dict):
        _is_default_outlines = any(k.startswith('head') for k in outlines)

    if _is_default_outlines and image_mask is None:
        # prepare masking
        image_mask, pos = _make_image_mask(outlines, pos, res)

    mask_params = _handle_default('mask_params', mask_params)

    # plot outline
    linewidth = mask_params['markeredgewidth']
    patch = None
    if 'patch' in outlines:
        patch = outlines['patch']
        patch_ = patch() if callable(patch) else patch
        patch_.set_clip_on(False)
        ax.add_patch(patch_)
        ax.set_transform(ax.transAxes)
        ax.set_clip_path(patch_)

    # plot map and countour
    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
                   interpolation=image_interp)

    # This tackles an incomprehensible matplotlib bug if no contours are
    # drawn. To avoid rescalings, we will always draw contours.
    # But if no contours are desired we only draw one and make it invisible .
    no_contours = False
    if contours in (False, None):
        contours, no_contours = 1, True
    cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
                      linewidths=linewidth)
    if no_contours is True:
        for col in cont.collections:
            col.set_visible(False)

    if _is_default_outlines:
        from matplotlib import patches
        patch_ = patches.Ellipse((0, 0),
                                 2 * outlines['clip_radius'][0],
                                 2 * outlines['clip_radius'][1],
                                 clip_on=True,
                                 transform=ax.transData)
    if _is_default_outlines or patch is not None:
        im.set_clip_path(patch_)
        if cont is not None:
            for col in cont.collections:
                col.set_clip_path(patch_)

    if sensors is not False and mask is None:
        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
    elif sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
        idx = np.where(~mask)[0]
        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
    elif not sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)

    if isinstance(outlines, dict):
        _draw_outlines(ax, outlines)

    if show_names:
        if names is None:
            raise ValueError("To show names, a list of names must be provided"
                             " (see `names` keyword).")
        if show_names is True:
            def _show_names(x):
                return x
        else:
            _show_names = show_names
        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
        for ii, (p, ch_id) in enumerate(zip(pos, names)):
            if ii not in show_idx:
                continue
            ch_id = _show_names(ch_id)
            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                    verticalalignment='center', size='x-small')

    plt.subplots_adjust(top=.95)

    if onselect is not None:
        ax.RS = RectangleSelector(ax, onselect=onselect)
    plt_show(show)
    return ax, im, cont, pos_x, pos_y
Пример #34
0
def tfr_morlet(data, sfreq, freqs, kind='amplitude', n_cycles=3.,
               use_fft=True, decimate=1, average=False):
    """Calculate the time-frequency representation of a signal.

    Parameters
    ----------
    data : array, shape (n_signals, n_times) | (n_epochs, n_signals, n_times)
        The data to calculate the TFR.
    sfreq : float | int
        The sampling frequency of the data
    freqs : array, shape (n_frequencies)
        The frequencies to calculate for the TFR.
    kind : string, 'filter', 'amplitude'
        What kind of TFR to output. If "filter", then the output will be a
        band pass filter of the signal. If "amplitude", the output will be
        the amplitude at each frequency band.
    n_cycles : int | array, shape (n_frequencies)
        The number of cycles for each frequency to include.
    use_fft : bool
        Whether to use an FFT to calculate the wavelet transform
    decimate : int
        The amount to decimate the output. If 1, no decimation will be done.
    average : bool
        Whether to average across the first dimension before returning results.

    Returns
    -------
    tfr : array, shape ([n_epochs], n_channels, n_frequencies, n_times)
        The time-frequency data calculated from inputs. If `average=True`, the
        output will be averaged across the first dimension (epochs).
    """
    # Loop through our data
    if average is True:
        if data.ndim < 3:
            raise ValueError('If averaging, data should have at least 3 dims')
        n_ep, n_sig, n_times = data.shape[-3:]
        n_freqs = len(freqs)
        tfr = np.zeros([n_sig, n_freqs, int(np.round(n_times / decimate))])
    else:
        tfr = []
    for i_data in tqdm(data):
        # Calculate the wavelet transform for each iteration and stack
        i_data = np.atleast_2d(i_data)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            this_tfr = cwt_morlet(i_data, sfreq,
                                  freqs, n_cycles=n_cycles,
                                  use_fft=use_fft)
        if kind == 'filter':
            # In this case, we'll just take the real values
            this_tfr = np.real(this_tfr)

            if decimate != 1:
                warn('Decimating band-passed data may cause artifacts.')
        elif kind == 'amplitude':
            # Now take the absolute value so it's only amplitude
            this_tfr = np.abs(this_tfr)
        else:
            raise ValueError('kind must be one of "filter" | "amplitude"')
        this_tfr = this_tfr[..., ::decimate]

        if average is True:
            tfr += this_tfr
        else:
            tfr.append(this_tfr)

    if average is True:
        tfr /= n_ep
    else:
        tfr = np.asarray(tfr)
    return tfr.squeeze()