Esempio n. 1
0
def _raise_bad_epochs(raw, epochs, events, kind=None, plot=True):
    extra = '' if kind is None else f' of type {kind} '
    if plot:
        plot_drop_log(epochs.drop_log)
        raw.plot(events=events)
    raise RuntimeError(
        f'Only {len(epochs)}/{len(events)} good epochs found{extra}')
Esempio n. 2
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    plot_drop_log(epochs.drop_log)
    plot_drop_log([["One"], [], []])
    plot_drop_log([["One"], ["Two"], []])
    plot_drop_log([["One"], ["One", "Two"], []])
Esempio n. 3
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    plot_drop_log(epochs.drop_log)
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
Esempio n. 4
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    plot_drop_log(epochs.drop_log)
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
Esempio n. 5
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()
    plot_drop_log(epochs.drop_log)
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 6
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()
    plot_drop_log(epochs.drop_log)
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 7
0
def test_plot_drop_log():
    """Test plotting a drop log."""
    epochs = _get_epochs()
    pytest.raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad()
    epochs.plot_drop_log()
    plot_drop_log((('One', ), (), ()))
    plot_drop_log((('One', ), ('Two', ), ()))
    plot_drop_log((('One', ), ('One', 'Two'), ()))
    for arg in ([], ([], ), (1, )):
        with pytest.raises(TypeError, match='tuple of tuple of str'):
            plot_drop_log(arg)
    plt.close('all')
Esempio n. 8
0
def test_plot_drop_log(epochs_unloaded):
    """Test plotting a drop log."""
    with pytest.raises(ValueError, match='bad epochs have not yet been'):
        epochs_unloaded.plot_drop_log()
    epochs_unloaded.drop_bad()
    epochs_unloaded.plot_drop_log()
    plot_drop_log((('One', ), (), ()))
    plot_drop_log((('One', ), ('Two', ), ()))
    plot_drop_log((('One', ), ('One', 'Two'), ()))
    for arg in ([], ([], ), (1, )):
        with pytest.raises(TypeError, match='tuple of tuple of str'):
            plot_drop_log(arg)
    plt.close('all')
Esempio n. 9
0
def test_plot_drop_log():
    """Test plotting a drop log."""
    epochs = _get_epochs()
    pytest.raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad()
    epochs.plot_drop_log()
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 10
0
def test_plot_drop_log():
    """Test plotting a drop log."""
    epochs = _get_epochs()
    pytest.raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad()
    epochs.plot_drop_log()
    plot_drop_log([['One'], [], []])
    plot_drop_log([['One'], ['Two'], []])
    plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 11
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()
    epochs.plot_drop_log()

    plot_drop_log([["One"], [], []])
    plot_drop_log([["One"], ["Two"], []])
    plot_drop_log([["One"], ["One", "Two"], []])
    plt.close("all")
Esempio n. 12
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()

    warnings.simplefilter("always", UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([["One"], [], []])
        plot_drop_log([["One"], ["Two"], []])
        plot_drop_log([["One"], ["One", "Two"], []])
    plt.close("all")
Esempio n. 13
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()

    warnings.simplefilter('always', UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([['One'], [], []])
        plot_drop_log([['One'], ['Two'], []])
        plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 14
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    epochs = _get_epochs()
    epochs.drop_bad_epochs()

    warnings.simplefilter('always', UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([['One'], [], []])
        plot_drop_log([['One'], ['Two'], []])
        plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 15
0
def test_plot_drop_log():
    """Test plotting a drop log."""
    import matplotlib.pyplot as plt
    epochs = _get_epochs()
    assert_raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad()

    warnings.simplefilter('always', UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([['One'], [], []])
        plot_drop_log([['One'], ['Two'], []])
        plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 16
0
def test_plot_drop_log():
    """Test plotting a drop log."""
    import matplotlib.pyplot as plt
    epochs = _get_epochs()
    assert_raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad()

    warnings.simplefilter('always', UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([['One'], [], []])
        plot_drop_log([['One'], ['Two'], []])
        plot_drop_log([['One'], ['One', 'Two'], []])
    plt.close('all')
Esempio n. 17
0
def test_plot_drop_log():
    """Test plotting a drop log
    """
    import matplotlib.pyplot as plt

    epochs = _get_epochs()
    assert_raises(ValueError, epochs.plot_drop_log)
    epochs.drop_bad_epochs()

    warnings.simplefilter("always", UserWarning)
    with warnings.catch_warnings(record=True):
        epochs.plot_drop_log()

        plot_drop_log([["One"], [], []])
        plot_drop_log([["One"], ["Two"], []])
        plot_drop_log([["One"], ["One", "Two"], []])
    plt.close("all")
#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = []  # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053']  # bads + 1 more

# pick EEG channels
picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
                        include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
evoked = epochs.average()  # average epochs and get an Evoked dataset.

evoked.save('sample_audvis_eeg-ave.fif')  # save evoked data to disk

###############################################################################
# View evoked response
times = 1e3 * epochs.times  # time in miliseconds
import pylab as pl
pl.figure()
evoked.plot()
pl.xlim([times[0], times[-1]])
pl.xlabel('time (ms)')
pl.ylabel('Potential (uV)')
pl.title('EEG evoked potential')
pl.show()

# Look at channels that caused dropped events, showing that the subject's
# blinks were likely to blame for most epochs being dropped
epochs.drop_bad_epochs()
plot_drop_log(epochs.drop_log, subject='sample')
Esempio n. 19
0
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
                out_numbers, must_match, decim, run_indices):
    """Generate epochs from raw data based on events

    Can only complete after preprocessing is complete.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    in_names : list of str
        Names of input events.
    in_numbers : list of list of int
        Event numbers (in scored event files) associated with each name.
    analyses : list of str
        Lists of analyses of interest.
    out_names : list of list of str
        Event types to make out of old ones.
    out_numbers : list of list of int
        Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
        three event types, where the first two and last two event types from
        the original list get collapsed over).
    must_match : list of int
        Indices from the original in_names that must match in event counts
        before collapsing. Should eventually be expanded to allow for
        ratio-based collapsing.
    decim : int | list of int
        Amount to decimate.
    run_indices : array-like | None
        Run indices to include.
    """
    in_names = np.asanyarray(in_names)
    old_dict = dict()
    for n, e in zip(in_names, in_numbers):
        old_dict[n] = e

    # let's do some sanity checks
    if len(in_names) != len(in_numbers):
        raise RuntimeError('in_names (%d) must have same length as '
                           'in_numbers (%d)' %
                           (len(in_names), len(in_numbers)))
    if np.any(np.array(in_numbers) <= 0):
        raise ValueError('in_numbers must all be > 0')
    if len(out_names) != len(out_numbers):
        raise RuntimeError('out_names must have same length as out_numbers')
    for name, num in zip(out_names, out_numbers):
        num = np.array(num)
        if len(name) != len(np.unique(num[num > 0])):
            raise RuntimeError('each entry in out_names must have length '
                               'equal to the number of unique elements in the '
                               'corresponding entry in out_numbers:\n%s\n%s' %
                               (name, np.unique(num[num > 0])))
        if len(num) != len(in_names):
            raise RuntimeError('each entry in out_numbers must have the same '
                               'length as in_names')
        if (np.array(num) == 0).any():
            raise ValueError('no element of out_numbers can be zero')

    ch_namess = list()
    drop_logs = list()
    sfreqs = set()
    for si, subj in enumerate(subjects):
        if p.disp_files:
            print('  Loading raw files for subject %s.' % subj)
        epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
        if not op.isdir(epochs_dir):
            os.mkdir(epochs_dir)
        evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
        if not op.isdir(evoked_dir):
            os.mkdir(evoked_dir)
        # read in raw files
        raw_names = get_raw_fnames(p, subj, 'pca', False, False,
                                   run_indices[si])
        first_samps = []
        last_samps = []
        for raw_fname in raw_names:
            raw = read_raw_fif(raw_fname, preload=False)
            first_samps.append(raw._first_samps[0])
            last_samps.append(raw._last_samps[-1])
        raw = [read_raw_fif(fname, preload=False) for fname in raw_names]
        _fix_raw_eog_cals(raw)  # EOG epoch scales might be bad!
        raw = concatenate_raws(raw)
        # read in events
        events = _read_events(p, subj, run_indices[si], raw)
        this_decim = _handle_decim(decim[si], raw.info['sfreq'])
        new_sfreq = raw.info['sfreq'] / this_decim
        if p.disp_files:
            print('    Epoching data (decim=%s -> sfreq=%0.1f Hz).' %
                  (this_decim, new_sfreq))
        if new_sfreq not in sfreqs:
            if len(sfreqs) > 0:
                warnings.warn('resulting new sampling frequency %s not equal '
                              'to previous values %s' % (new_sfreq, sfreqs))
            sfreqs.add(new_sfreq)
        epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(
            p, subj, analyses)
        mat_file, fif_file = epochs_fnames
        if p.autoreject_thresholds:
            assert len(p.autoreject_types) > 0
            assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
                       for a in p.autoreject_types)
            from autoreject import get_rejection_threshold
            print('    Computing autoreject thresholds', end='')
            rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
            temp_epochs = Epochs(raw,
                                 events,
                                 event_id=None,
                                 tmin=rtmin,
                                 tmax=rtmax,
                                 baseline=_get_baseline(p),
                                 proj=True,
                                 reject=None,
                                 flat=None,
                                 preload=True,
                                 decim=this_decim,
                                 reject_by_annotation=p.reject_epochs_by_annot)
            kwargs = dict()
            if 'verbose' in get_args(get_rejection_threshold):
                kwargs['verbose'] = False
            new_dict = get_rejection_threshold(temp_epochs, **kwargs)
            use_reject = dict()
            msgs = list()
            for k in p.autoreject_types:
                msgs.append('%s=%d %s' % (k, DEFAULTS['scalings'][k] *
                                          new_dict[k], DEFAULTS['units'][k]))
                use_reject[k] = new_dict[k]
            print(': ' + ', '.join(msgs))
            hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
            assert hdf5_file.endswith('.h5')
            write_hdf5(hdf5_file, use_reject, overwrite=True)
        else:
            use_reject = _handle_dict(p.reject, subj)
        # create epochs
        flat = _handle_dict(p.flat, subj)
        use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
        epochs = Epochs(raw,
                        events,
                        event_id=old_dict,
                        tmin=p.tmin,
                        tmax=p.tmax,
                        baseline=_get_baseline(p),
                        reject=use_reject,
                        flat=use_flat,
                        proj=p.epochs_proj,
                        preload=True,
                        decim=this_decim,
                        on_missing=p.on_missing,
                        reject_tmin=p.reject_tmin,
                        reject_tmax=p.reject_tmax,
                        reject_by_annotation=p.reject_epochs_by_annot)
        del raw
        if epochs.events.shape[0] < 1:
            epochs.plot_drop_log()
            raise ValueError('No valid epochs')
        drop_logs.append(epochs.drop_log)
        ch_namess.append(epochs.ch_names)
        # only kept trials that were not dropped
        sfreq = epochs.info['sfreq']
        # now deal with conditions to save evoked
        if p.disp_files:
            print('    Matching trial counts and saving data to disk.')
        for var, name in ((out_names, 'out_names'), (out_numbers,
                                                     'out_numbers'),
                          (must_match, 'must_match'), (evoked_fnames,
                                                       'evoked_fnames')):
            if len(var) != len(analyses):
                raise ValueError('len(%s) (%s) != len(analyses) (%s)' %
                                 (name, len(var), len(analyses)))
        for analysis, names, numbers, match, fn in zip(analyses, out_names,
                                                       out_numbers, must_match,
                                                       evoked_fnames):
            # do matching
            numbers = np.asanyarray(numbers)
            nn = numbers[numbers >= 0]
            new_numbers = []
            for num in numbers:
                if num > 0 and num not in new_numbers:
                    # Eventually we could relax this requirement, but not
                    # having it in place is likely to cause people pain...
                    if any(num < n for n in new_numbers):
                        raise RuntimeError('each list of new_numbers must be '
                                           ' monotonically increasing')
                    new_numbers.append(num)
            new_numbers = np.array(new_numbers)
            in_names_match = in_names[match]
            # use some variables to allow safe name re-use
            offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
            safety_str = '__mnefun_copy__'
            assert len(new_numbers) == len(names)  # checked above
            if p.match_fun is None:
                # first, equalize trial counts (this will make a copy)
                e = epochs[list(in_names[numbers > 0])]
                if len(in_names_match) > 1:
                    e.equalize_event_counts(in_names_match)

                # second, collapse relevant types
                for num, name in zip(new_numbers, names):
                    collapse = [
                        x for x in in_names[num == numbers] if x in e.event_id
                    ]
                    combine_event_ids(e,
                                      collapse,
                                      {name + safety_str: num + offset},
                                      copy=False)
                for num, name in zip(new_numbers, names):
                    e.events[e.events[:, 2] == num + offset, 2] -= offset
                    e.event_id[name] = num
                    del e.event_id[name + safety_str]
            else:  # custom matching
                e = p.match_fun(epochs.copy(), analysis, nn, in_names_match,
                                names)

            # now make evoked for each out type
            evokeds = list()
            n_standard = 0
            kinds = ['standard']
            if p.every_other:
                kinds += ['even', 'odd']
            for kind in kinds:
                for name in names:
                    this_e = e[name]
                    if kind == 'even':
                        this_e = this_e[::2]
                    elif kind == 'odd':
                        this_e = this_e[1::2]
                    else:
                        assert kind == 'standard'
                    if len(this_e) > 0:
                        ave = this_e.average(picks='all')
                        stde = this_e.standard_error(picks='all')
                        if kind != 'standard':
                            ave.comment += ' %s' % (kind, )
                            stde.comment += ' %s' % (kind, )
                        evokeds.append(ave)
                        evokeds.append(stde)
                        if kind == 'standard':
                            n_standard += 2
            write_evokeds(fn, evokeds)
            naves = [
                str(n) for n in sorted(
                    set([evoked.nave for evoked in evokeds[:n_standard]]))
            ]
            naves = ', '.join(naves)
            if p.disp_files:
                print('      Analysis "%s": %s epochs / condition' %
                      (analysis, naves))

        if p.disp_files:
            print('    Saving epochs to disk.')
        if 'mat' in p.epochs_type:
            spio.savemat(mat_file,
                         dict(epochs=epochs.get_data(),
                              events=epochs.events,
                              sfreq=sfreq,
                              drop_log=epochs.drop_log),
                         do_compression=True,
                         oned_as='column')
        if 'fif' in p.epochs_type:
            epochs.save(fif_file, **_get_epo_kwargs())

    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)
Esempio n. 20
0
def do_preprocessing_combined(p, subjects, run_indices):
    """Do preprocessing on all raw files together.

    Calculates projection vectors to use to clean data.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    run_indices : array-like | None
        Run indices to include.
    """
    drop_logs = list()
    for si, subj in enumerate(subjects):
        proj_nums = _proj_nums(p, subj)
        ecg_channel = _handle_dict(p.ecg_channel, subj)
        flat = _handle_dict(p.flat, subj)
        if p.disp_files:
            print('  Preprocessing subject %g/%g (%s).' %
                  (si + 1, len(subjects), subj))
        pca_dir = _get_pca_dir(p, subj)
        bad_file = get_bad_fname(p, subj, check_exists=False)

        # Create SSP projection vectors after marking bad channels
        raw_names = get_raw_fnames(p, subj, 'sss', False, False,
                                   run_indices[si])
        empty_names = get_raw_fnames(p, subj, 'sss', 'only')
        for r in raw_names + empty_names:
            if not op.isfile(r):
                raise NameError('File not found (' + r + ')')

        fir_kwargs, old_kwargs = _get_fir_kwargs(p.fir_design)
        if isinstance(p.auto_bad, float):
            print('    Creating post SSS bad channel file:\n'
                  '        %s' % bad_file)
            # do autobad
            raw = _raw_LRFCP(raw_names,
                             p.proj_sfreq,
                             None,
                             None,
                             p.n_jobs_fir,
                             p.n_jobs_resample,
                             list(),
                             None,
                             p.disp_files,
                             method='fir',
                             filter_length=p.filter_length,
                             apply_proj=False,
                             force_bads=False,
                             l_trans=p.hp_trans,
                             h_trans=p.lp_trans,
                             phase=p.phase,
                             fir_window=p.fir_window,
                             pick=True,
                             skip_by_annotation='edge',
                             **fir_kwargs)
            events = fixed_len_events(p, raw)
            rtmin = p.reject_tmin \
                if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax \
                if p.reject_tmax is not None else p.tmax
            # do not mark eog channels bad
            meg, eeg = 'meg' in raw, 'eeg' in raw
            picks = pick_types(raw.info,
                               meg=meg,
                               eeg=eeg,
                               eog=False,
                               exclude=[])
            assert p.auto_bad_flat is None or isinstance(p.auto_bad_flat, dict)
            assert p.auto_bad_reject is None or \
                isinstance(p.auto_bad_reject, dict) or \
                p.auto_bad_reject == 'auto'
            if p.auto_bad_reject == 'auto':
                print('    Auto bad channel selection active. '
                      'Will try using Autoreject module to '
                      'compute rejection criterion.')
                try:
                    from autoreject import get_rejection_threshold
                except ImportError:
                    raise ImportError('     Autoreject module not installed.\n'
                                      '     Noisy channel detection parameter '
                                      '     not defined. To use autobad '
                                      '     channel selection either define '
                                      '     rejection criteria or install '
                                      '     Autoreject module.\n')
                print('    Computing thresholds.\n', end='')
                temp_epochs = Epochs(raw,
                                     events,
                                     event_id=None,
                                     tmin=rtmin,
                                     tmax=rtmax,
                                     baseline=_get_baseline(p),
                                     proj=True,
                                     reject=None,
                                     flat=None,
                                     preload=True,
                                     decim=1)
                kwargs = dict()
                if 'verbose' in get_args(get_rejection_threshold):
                    kwargs['verbose'] = False
                reject = get_rejection_threshold(temp_epochs, **kwargs)
                reject = {kk: vv for kk, vv in reject.items()}
            elif p.auto_bad_reject is None and p.auto_bad_flat is None:
                raise RuntimeError('Auto bad channel detection active. Noisy '
                                   'and flat channel detection '
                                   'parameters not defined. '
                                   'At least one criterion must be defined.')
            else:
                reject = p.auto_bad_reject
            if 'eog' in reject.keys():
                reject.pop('eog', None)
            epochs = Epochs(raw,
                            events,
                            None,
                            tmin=rtmin,
                            tmax=rtmax,
                            baseline=_get_baseline(p),
                            picks=picks,
                            reject=reject,
                            flat=p.auto_bad_flat,
                            proj=True,
                            preload=True,
                            decim=1,
                            reject_tmin=rtmin,
                            reject_tmax=rtmax)
            # channel scores from drop log
            drops = Counter([ch for d in epochs.drop_log for ch in d])
            # get rid of non-channel reasons in drop log
            scores = {
                kk: vv
                for kk, vv in drops.items() if kk in epochs.ch_names
            }
            ch_names = np.array(list(scores.keys()))
            # channel scores expressed as percentile and rank ordered
            counts = (100 * np.array([scores[ch] for ch in ch_names], float) /
                      len(epochs.drop_log))
            order = np.argsort(counts)[::-1]
            # boolean array masking out channels with <= % epochs dropped
            mask = counts[order] > p.auto_bad
            badchs = ch_names[order[mask]]
            if len(badchs) > 0:
                # Make sure we didn't get too many bad MEG or EEG channels
                for m, e, thresh in zip(
                    [True, False], [False, True],
                    [p.auto_bad_meg_thresh, p.auto_bad_eeg_thresh]):
                    picks = pick_types(epochs.info, meg=m, eeg=e, exclude=[])
                    if len(picks) > 0:
                        ch_names = [epochs.ch_names[pp] for pp in picks]
                        n_bad_type = sum(ch in ch_names for ch in badchs)
                        if n_bad_type > thresh:
                            stype = 'meg' if m else 'eeg'
                            raise RuntimeError('Too many bad %s channels '
                                               'found: %s > %s' %
                                               (stype, n_bad_type, thresh))

                print('    The following channels resulted in greater than '
                      '{:.0f}% trials dropped:\n'.format(p.auto_bad * 100))
                print(badchs)
                with open(bad_file, 'w') as f:
                    f.write('\n'.join(badchs))
        if not op.isfile(bad_file):
            print('    Clearing bad channels (no file %s)' %
                  op.sep.join(bad_file.split(op.sep)[-3:]))
            bad_file = None

        ecg_t_lims = _handle_dict(p.ecg_t_lims, subj)
        ecg_f_lims = p.ecg_f_lims

        ecg_eve = op.join(pca_dir, 'preproc_ecg-eve.fif')
        ecg_epo = op.join(pca_dir, 'preproc_ecg-epo.fif')
        ecg_proj = op.join(pca_dir, 'preproc_ecg-proj.fif')
        all_proj = op.join(pca_dir, 'preproc_all-proj.fif')

        get_projs_from = _handle_dict(p.get_projs_from, subj)
        if get_projs_from is None:
            get_projs_from = np.arange(len(raw_names))
        pre_list = [
            r for ri, r in enumerate(raw_names) if ri in get_projs_from
        ]

        projs = list()
        raw_orig = _raw_LRFCP(raw_names=pre_list,
                              sfreq=p.proj_sfreq,
                              l_freq=None,
                              h_freq=None,
                              n_jobs=p.n_jobs_fir,
                              n_jobs_resample=p.n_jobs_resample,
                              projs=projs,
                              bad_file=bad_file,
                              disp_files=p.disp_files,
                              method='fir',
                              filter_length=p.filter_length,
                              force_bads=False,
                              l_trans=p.hp_trans,
                              h_trans=p.lp_trans,
                              phase=p.phase,
                              fir_window=p.fir_window,
                              pick=True,
                              skip_by_annotation='edge',
                              **fir_kwargs)

        # Apply any user-supplied extra projectors
        if p.proj_extra is not None:
            if p.disp_files:
                print('    Adding extra projectors from "%s".' % p.proj_extra)
            projs.extend(read_proj(op.join(pca_dir, p.proj_extra)))

        proj_kwargs, p_sl = _get_proj_kwargs(p)
        #
        # Calculate and apply ERM projectors
        #
        if not p.cont_as_esss:
            if any(proj_nums[2]):
                assert proj_nums[2][2] == 0  # no EEG projectors for ERM
                if len(empty_names) == 0:
                    raise RuntimeError('Cannot compute empty-room projectors '
                                       'from continuous raw data')
                if p.disp_files:
                    print('    Computing continuous projectors using ERM.')
                # Use empty room(s), but processed the same way
                projs.extend(_compute_erm_proj(p, subj, projs, 'sss',
                                               bad_file))
            else:
                cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
                _safe_remove(cont_proj)

        #
        # Calculate and apply the ECG projectors
        #
        if any(proj_nums[0]):
            if p.disp_files:
                print('    Computing ECG projectors...', end='')
            raw = raw_orig.copy()

            raw.filter(ecg_f_lims[0],
                       ecg_f_lims[1],
                       n_jobs=p.n_jobs_fir,
                       method='fir',
                       filter_length=p.filter_length,
                       l_trans_bandwidth=0.5,
                       h_trans_bandwidth=0.5,
                       phase='zero-double',
                       fir_window='hann',
                       skip_by_annotation='edge',
                       **old_kwargs)
            raw.add_proj(projs)
            raw.apply_proj()
            find_kwargs = dict()
            if 'reject_by_annotation' in get_args(find_ecg_events):
                find_kwargs['reject_by_annotation'] = True
            elif len(raw.annotations) > 0:
                print('    WARNING: ECG event detection will not make use of '
                      'annotations, please update MNE-Python')
            # We've already filtered the data channels above, but this
            # filters the ECG channel
            ecg_events = find_ecg_events(raw,
                                         999,
                                         ecg_channel,
                                         0.,
                                         ecg_f_lims[0],
                                         ecg_f_lims[1],
                                         qrs_threshold='auto',
                                         return_ecg=False,
                                         **find_kwargs)[0]
            use_reject, use_flat = _restrict_reject_flat(
                _handle_dict(p.ssp_ecg_reject, subj), flat, raw)
            ecg_epochs = Epochs(raw,
                                ecg_events,
                                999,
                                ecg_t_lims[0],
                                ecg_t_lims[1],
                                baseline=None,
                                reject=use_reject,
                                flat=use_flat,
                                preload=True)
            print('  obtained %d epochs from %d events.' %
                  (len(ecg_epochs), len(ecg_events)))
            if len(ecg_epochs) >= 20:
                write_events(ecg_eve, ecg_epochs.events)
                ecg_epochs.save(ecg_epo, **_get_epo_kwargs())
                desc_prefix = 'ECG-%s-%s' % tuple(ecg_t_lims)
                pr = compute_proj_wrap(ecg_epochs,
                                       p.proj_ave,
                                       n_grad=proj_nums[0][0],
                                       n_mag=proj_nums[0][1],
                                       n_eeg=proj_nums[0][2],
                                       desc_prefix=desc_prefix,
                                       **proj_kwargs)
                assert len(pr) == np.sum(proj_nums[0][::p_sl])
                write_proj(ecg_proj, pr)
                projs.extend(pr)
            else:
                plot_drop_log(ecg_epochs.drop_log)
                raw.plot(events=ecg_epochs.events)
                raise RuntimeError('Only %d/%d good ECG epochs found' %
                                   (len(ecg_epochs), len(ecg_events)))
            del raw, ecg_epochs, ecg_events
        else:
            _safe_remove([ecg_proj, ecg_eve, ecg_epo])

        #
        # Next calculate and apply the EOG projectors
        #
        for idx, kind in ((1, 'EOG'), (3, 'HEOG'), (4, 'VEOG')):
            _compute_add_eog(p, subj, raw_orig, projs, proj_nums[idx], kind,
                             pca_dir, flat, proj_kwargs, old_kwargs, p_sl)
        del proj_nums

        # save the projectors
        write_proj(all_proj, projs)

        #
        # Look at raw_orig for trial DQs now, it will be quick
        #
        raw_orig.filter(p.hp_cut,
                        p.lp_cut,
                        n_jobs=p.n_jobs_fir,
                        method='fir',
                        filter_length=p.filter_length,
                        l_trans_bandwidth=p.hp_trans,
                        phase=p.phase,
                        h_trans_bandwidth=p.lp_trans,
                        fir_window=p.fir_window,
                        skip_by_annotation='edge',
                        **fir_kwargs)
        raw_orig.add_proj(projs)
        raw_orig.apply_proj()
        # now let's epoch with 1-sec windows to look for DQs
        events = fixed_len_events(p, raw_orig)
        reject = _handle_dict(p.reject, subj)
        use_reject, use_flat = _restrict_reject_flat(reject, flat, raw_orig)
        epochs = Epochs(raw_orig,
                        events,
                        None,
                        p.tmin,
                        p.tmax,
                        preload=False,
                        baseline=_get_baseline(p),
                        reject=use_reject,
                        flat=use_flat,
                        proj=True)
        try:
            epochs.drop_bad()
        except AttributeError:  # old way
            epochs.drop_bad_epochs()
        drop_logs.append(epochs.drop_log)
        del raw_orig
        del epochs
    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, p.drop_thresh, subject=subj)