예제 #1
0
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the first step of the FASTER algorithm.

    This function attempts to automatically mark bad EEG channels by performing
    outlier detection. It operated on epoched data, to make sure only relevant
    data is analyzed.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
            'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """
    from scipy.stats import kurtosis
    metrics = {
        'variance':
        lambda x: np.var(x, axis=1),
        'correlation':
        lambda x: np.mean(np.ma.masked_array(np.corrcoef(x),
                                             np.identity(len(x), dtype=bool)),
                          axis=0),
        'hurst':
        lambda x: _hurst(x),
        'kurtosis':
        lambda x: kurtosis(x, axis=1),
        'line_noise':
        lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    # Concatenate epochs in time
    data = epochs.get_data()[:, picks]
    data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)

    # Find bad channels
    bads = defaultdict(list)
    info = pick_info(epochs.info, picks, copy=True)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad channel detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[chs])
            bad_channels = [
                epochs.ch_names[picks[chs[i]]]
                for i in find_outliers(scores, thresh, max_iter)
            ]
            logger.info('\tBad by %s: %s' % (metric, bad_channels))
            bads[metric].append(bad_channels)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
예제 #2
0
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the first step of the FASTER algorithm.

    This function attempts to automatically mark bad EEG channels by performing
    outlier detection. It operated on epoched data, to make sure only relevant
    data is analyzed.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
            'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """
    from scipy.stats import kurtosis
    metrics = {
        'variance': lambda x: np.var(x, axis=1),
        'correlation': lambda x: np.mean(
            np.ma.masked_array(np.corrcoef(x),
                               np.identity(len(x), dtype=bool)), axis=0),
        'hurst': lambda x: _hurst(x),
        'kurtosis': lambda x: kurtosis(x, axis=1),
        'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
                                             [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    # Concatenate epochs in time
    data = epochs.get_data()[:, picks]
    data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)

    # Find bad channels
    bads = defaultdict(list)
    info = pick_info(epochs.info, picks, copy=True)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad channel detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[chs])
            bad_channels = [epochs.ch_names[picks[chs[i]]]
                            for i in find_outliers(scores, thresh, max_iter)]
            logger.info('\tBad by %s: %s' % (metric, bad_channels))
            bads[metric].append(bad_channels)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
예제 #3
0
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the fourth step of the FASTER algorithm.

    This function attempts to automatically mark bad channels in each epochs by
    performing outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation', 'median_gradient'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.ptp(x, axis=2),
        'deviation': lambda x: _deviation(x),
        'variance': lambda x: np.var(x, axis=2),
        'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
        'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
                                             [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]
    bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for
                m in metrics)
    for ch_type, chs in _picks_by_type(info):
        ch_names = [info['ch_names'][k] for k in chs]
        chs = np.array(chs)
        for metric in use_metrics:
            logger.info('Bad channel-in-epoch detection on %s channels:'
                        % ch_type.upper())
            s_epochs = metrics[metric](data[:, chs])
            for i_epochs, epoch in enumerate(s_epochs):
                outliers = find_outliers(epoch, thresh, max_iter)
                if len(outliers) > 0:
                    bad_segment = [ch_names[k] for k in outliers]
                    logger.info('Epoch %d, Bad by %s:\n\t%s' % (
                        i_epochs, metric, bad_segment))
                    bads[metric][i_epochs, chs[outliers]] = True

    return bads
예제 #4
0
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the fourth step of the FASTER algorithm.

    This function attempts to automatically mark bad channels in each epochs by
    performing outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation', 'median_gradient'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.ptp(x, axis=2),
        'deviation': lambda x: _deviation(x),
        'variance': lambda x: np.var(x, axis=2),
        'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
        'line_noise':
        lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]
    bads = dict(
        (m, np.zeros((len(data), len(picks)), dtype=bool)) for m in metrics)
    for ch_type, chs in _picks_by_type(info):
        ch_names = [info['ch_names'][k] for k in chs]
        chs = np.array(chs)
        for metric in use_metrics:
            logger.info('Bad channel-in-epoch detection on %s channels:' %
                        ch_type.upper())
            s_epochs = metrics[metric](data[:, chs])
            for i_epochs, epoch in enumerate(s_epochs):
                outliers = find_outliers(epoch, thresh, max_iter)
                if len(outliers) > 0:
                    bad_segment = [ch_names[k] for k in outliers]
                    logger.info('Epoch %d, Bad by %s:\n\t%s' %
                                (i_epochs, metric, bad_segment))
                    bads[metric][i_epochs, chs[outliers]] = True

    return bads
예제 #5
0
def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the second step of the FASTER algorithm.

    This function attempts to automatically mark bad epochs by performing
    outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation'. Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.mean(np.ptp(x, axis=2), axis=1),
        'deviation': lambda x: np.mean(_deviation(x), axis=1),
        'variance': lambda x: np.mean(np.var(x, axis=2), axis=1),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]

    bads = defaultdict(list)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad epoch detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[:, chs])
            bad_epochs = find_outliers(scores, thresh, max_iter)
            logger.info('\tBad by %s: %s' % (metric, bad_epochs))
            bads[metric].append(bad_epochs)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
예제 #6
0
def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the second step of the FASTER algorithm.

    This function attempts to automatically mark bad epochs by performing
    outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation'. Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.mean(np.ptp(x, axis=2), axis=1),
        'deviation': lambda x: np.mean(_deviation(x), axis=1),
        'variance': lambda x: np.mean(np.var(x, axis=2), axis=1),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]

    bads = defaultdict(list)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad epoch detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[:, chs])
            bad_epochs = find_outliers(scores, thresh, max_iter)
            logger.info('\tBad by %s: %s' % (metric, bad_epochs))
            bads[metric].append(bad_epochs)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
예제 #7
0
trans = {'from': mri_head_t['to'], 'to': mri_head_t['from'], 'trans': np.linalg.inv(mri_head_t['trans'])}
mne.transforms.write_trans(trans_file_path, trans)

source_space = fwd['src']

mne.viz.plot_alignment(info=fwd_info, trans=trans_file_path, subject=subject,
                       # surfaces=['outer_skin', 'pial'],  # can't see the source space
                       eeg='projected',
                       meg=False, src=source_space, subjects_dir=subjects_dir)
'''

# Load file, find bad channels, interpolate, check that alpha peaks around 10 Hz, filter in
file_path = r"D:\Cognigraph\eyes\Koleno.vhdr"
raw = mne.io.brainvision.read_raw_brainvision(file_path,
                                              preload=True)  # type: mne.io.Raw
bad_indices = find_outliers(np.std(raw.get_data(), axis=1), max_iter=3)
bads = [raw.info['chs'][idx]['ch_name'] for idx in bad_indices]
raw.info['bads'] = bads
fill_eeg_channel_locations(raw.info)
raw.interpolate_bads(mode='fast')
# raw.plot_psd()
raw.filter(l_freq=8, h_freq=12)
raw.set_eeg_reference(projection=True)

# Construct inverse, apply
info = raw.info
G = fwd['sol']['data']
q = 1
# q = np.trace(G.dot(G.T)) / G.shape[0]

# picks = mne.pick_types(info, eeg=True, meg=False)
예제 #8
0
def find_bads_misc(ica,
                   inst,
                   ch_name=None,
                   threshold=3.0,
                   start=None,
                   stop=None,
                   l_freq=1,
                   h_freq=10,
                   verbose=None):
    """Detect EOG related components using correlation
    Detection is based on Pearson correlation between the
    filtered data and the filtered ECG channel.
    Thresholding is based on adaptive z-scoring. The above threshold
    components will be masked and the z-score will be recomputed
    until no supra-threshold component remains.
    Parameters
    ----------
    inst : instance of Raw, Epochs or Evoked
        Object to compute sources from.
    ch_name : str
        The name of the channel to use for ECG peak detection.
        The argument is mandatory if the dataset contains no ECG
        channels.
    threshold : int | float
        The value above which a feature is classified as outlier.
    start : int | float | None
        First sample to include. If float, data will be interpreted as
        time in seconds. If None, data will be used from the first sample.
    stop : int | float | None
        Last sample to not include. If float, data will be interpreted as
        time in seconds. If None, data will be used to the last sample.
    l_freq : float
        Low pass frequency.
    h_freq : float
        High pass frequency.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).
        Defaults to self.verbose.
    Returns
    -------
    ecg_idx : list of int
        The indices of EOG related components, sorted by score.
    scores : np.ndarray of float, shape (ica.n_components_) | list of array
        The correlation scores.
    """

    misc_inds = _get_eog_channel_index(ch_name, inst)
    if len(misc_inds) > 2:
        misc_inds = misc_inds[:1]
    scores, misc_idx = [], []
    misc_chs = [inst.ch_names[k] for k in misc_inds]

    # some magic we need inevitably ...
    # get targets befor equalizing
    targets = [ica._check_target(k, inst, start, stop) for k in misc_chs]

    if inst.ch_names != ica.ch_names:
        inst = inst.pick_channels(ica.ch_names)

    for misc_chs, target in zip(misc_chs, targets):
        scores += [
            ica.score_sources(inst,
                              target=target,
                              score_func='pearsonr',
                              start=start,
                              stop=stop,
                              l_freq=l_freq,
                              h_freq=h_freq,
                              verbose=verbose)
        ]
        misc_idx += [find_outliers(scores[-1], threshold=threshold)]

    # remove duplicates but keep order by score, even across multiple
    # EOG channels
    scores_ = np.concatenate(
        [scores[ii][inds] for ii, inds in enumerate(misc_idx)])
    misc_idx_ = np.concatenate(misc_idx)[np.abs(scores_).argsort()[::-1]]

    misc_idx_unique = list(np.unique(misc_idx_))
    misc_idx = []
    for i in misc_idx_:
        if i in misc_idx_unique:
            misc_idx.append(i)
            misc_idx_unique.remove(i)
    if len(scores) == 1:
        scores = scores[0]

    return misc_idx, scores