stats = corr_stats.cluster_permutation_test(
    diff_cons,
    Behav,
    cluster_threshold=cluster_threshold,
    src=fs_src,
    n_permutations=1000,
    verbose=True,
    alpha=0.05,
    n_jobs=2,
    seed=10,
    return_details=True,
    max_spread=0.01)
connection_indices, bundles, bundle_ts, bundle_ps, H0 = stats
con_clust = ga_con_diff[connection_indices]

# Save some details about the permutation stats to disk
write_hdf5('{dir}NEMO_N-P_connect_corr_{c}_{f}-stats.h5'.format(dir=meg_dir,
                                                                c=cond,
                                                                f=freq),
           dict(connection_indices=connection_indices,
                bundles=bundles,
                bundle_ts=bundle_ts,
                bundle_ps=bundle_ps,
                H0=H0),
           overwrite=True)

# Save the pruned grand average connection object
con_clust.save(
    '{dir}NEMO_N-P_connect_corr_{c}_{f}-pruned-avg-connectivity.h5'.format(
        dir=meg_dir, c=cond, f=freq))
Exemple #2
0
def _test_raw_reader(reader,
                     test_preloading=True,
                     test_kwargs=True,
                     boundary_decimal=2,
                     test_scaling=True,
                     test_rank=True,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        rep = repr(raw)
        assert rep.count('<') == 1
        assert rep.count('>') == 1
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)

        # test projection vs cals and data units
        other_raw = reader(preload=False, **kwargs)
        other_raw.del_proj()
        eeg = meg = fnirs = False
        if 'eeg' in raw:
            eeg, atol = True, 1e-18
        elif 'grad' in raw:
            meg, atol = 'grad', 1e-24
        elif 'mag' in raw:
            meg, atol = 'mag', 1e-24
        else:
            assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?'
            fnirs, atol = 'fnirs_cw_amplitude', 1e-10
        picks = pick_types(other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs)
        col_names = [other_raw.ch_names[pick] for pick in picks]
        proj = np.ones((1, len(picks)))
        proj /= proj.shape[1]
        proj = Projection(data=dict(data=proj,
                                    nrow=1,
                                    row_names=None,
                                    col_names=col_names,
                                    ncol=len(picks)),
                          active=False)
        assert len(other_raw.info['projs']) == 0
        other_raw.add_proj(proj)
        assert len(other_raw.info['projs']) == 1
        # Orders of projector application, data loading, and reordering
        # equivalent:
        # 1. load->apply->get
        data_load_apply_get = \
            other_raw.copy().load_data().apply_proj().get_data(picks)
        # 2. apply->get (and don't allow apply->pick)
        apply = other_raw.copy().apply_proj()
        data_apply_get = apply.get_data(picks)
        data_apply_get_0 = apply.get_data(picks[0])[0]
        with pytest.raises(RuntimeError, match='loaded'):
            apply.copy().pick(picks[0]).get_data()
        # 3. apply->load->get
        data_apply_load_get = apply.copy().load_data().get_data(picks)
        data_apply_load_get_0, data_apply_load_get_1 = \
            apply.copy().load_data().pick(picks[:2]).get_data()
        # 4. reorder->apply->load->get
        all_picks = np.arange(len(other_raw.ch_names))
        reord = np.concatenate(
            (picks[1::2], picks[0::2], np.setdiff1d(all_picks, picks)))
        rev = np.argsort(reord)
        assert_array_equal(reord[rev], all_picks)
        assert_array_equal(rev[reord], all_picks)
        reorder = other_raw.copy().pick(reord)
        assert reorder.ch_names == [other_raw.ch_names[r] for r in reord]
        assert reorder.ch_names[0] == other_raw.ch_names[picks[1]]
        assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1]))
        reorder_apply = reorder.copy().apply_proj()
        assert reorder_apply.ch_names == reorder.ch_names
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert_allclose(reorder_apply.get_data([0]),
                        apply.get_data(picks[1]),
                        atol=1e-18)
        data_reorder_apply_load_get = \
            reorder_apply.load_data().get_data(rev[:len(picks)])
        data_reorder_apply_load_get_1 = \
            reorder_apply.copy().load_data().pick([0]).get_data()[0]
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert (data_load_apply_get.shape == data_apply_get.shape ==
                data_apply_load_get.shape == data_reorder_apply_load_get.shape)
        del apply
        # first check that our data are (probably) in the right units
        data = data_load_apply_get.copy()
        data = data - np.mean(data, axis=1, keepdims=True)  # can be offsets
        np.abs(data, out=data)
        if test_scaling:
            maxval = atol * 1e16
            assert_array_less(data, maxval)
            minval = atol * 1e6
            assert_array_less(minval, np.median(data))
        else:
            atol = 1e-7 * np.median(data)  # 1e-7 * MAD
        # ranks should all be reduced by 1
        if test_rank == 'less':
            cmp = np.less
        else:
            cmp = np.equal
        rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get)
        rank_apply_get = np.linalg.matrix_rank(data_apply_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        assert cmp(rank_load_apply_get, len(col_names) - 1)
        assert cmp(rank_apply_get, len(col_names) - 1)
        assert cmp(rank_apply_load_get, len(col_names) - 1)
        # and they should all match
        t_kw = dict(atol=atol,
                    err_msg='before != after, likely _mult_cal_one prob')
        assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw)
        assert_allclose(data_apply_load_get_1, data_reorder_apply_load_get_1,
                        **t_kw)
        assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_get, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw)
        if 'eeg' in raw:
            other_raw.del_proj()
            direct = \
                other_raw.copy().load_data().set_eeg_reference().get_data()
            other_raw.set_eeg_reference(projection=True)
            assert len(other_raw.info['projs']) == 1
            this_proj = other_raw.info['projs'][0]['data']
            assert this_proj['col_names'] == col_names
            assert this_proj['data'].shape == proj['data']['data'].shape
            assert_allclose(this_proj['data'], proj['data']['data'])
            proj = other_raw.apply_proj().get_data()
            assert_allclose(proj[picks], data_load_apply_get, atol=1e-10)
            assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg'])
    else:
        raw = reader(**kwargs)
    assert_named_constants(raw.info)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    data_max = full_data.max()
    data_min = full_data.min()
    # these limits could be relaxed if we actually find data with
    # huge values (in SI units)
    assert data_max < 1e5
    assert data_min > -1e5
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    meas_date = raw.info['meas_date']
    assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)

    # Test saving with not correct extension
    out_fname_h5 = op.join(tempdir, 'test_raw.h5')
    with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'):
        raw.save(out_fname_h5)

    raw3 = read_raw_fif(out_fname)
    assert_named_constants(raw3.info)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    # Test picking with and without preload
    if test_preloading:
        preload_kwargs = (dict(preload=True), dict(preload=False))
    else:
        preload_kwargs = (dict(), )
    n_ch = len(raw.ch_names)
    picks = rng.permutation(n_ch)
    for preload_kwarg in preload_kwargs:
        these_kwargs = kwargs.copy()
        these_kwargs.update(preload_kwarg)
        # don't use the same filename or it could create problems
        if isinstance(these_kwargs.get('preload', None), str) and \
                op.isfile(these_kwargs['preload']):
            these_kwargs['preload'] += '-1'
        whole_raw = reader(**these_kwargs)
        print(whole_raw)  # __repr__
        assert n_ch >= 2
        picks_1 = picks[:n_ch // 2]
        picks_2 = picks[n_ch // 2:]
        raw_1 = whole_raw.copy().pick(picks_1)
        raw_2 = whole_raw.copy().pick(picks_2)
        data, times = whole_raw[:]
        data_1, times_1 = raw_1[:]
        data_2, times_2 = raw_2[:]
        assert_array_equal(times, times_1)
        assert_array_equal(data[picks_1], data_1)
        assert_array_equal(
            times,
            times_2,
        )
        assert_array_equal(data[picks_2], data_2)

    # Make sure that writing info to h5 format
    # (all fields should be compatible)
    if check_version('h5py'):
        fname_h5 = op.join(tempdir, 'info.h5')
        with _writing_info_hdf5(raw.info):
            write_hdf5(fname_h5, raw.info)
        new_info = Info(read_hdf5(fname_h5))
        assert object_diff(new_info, raw.info) == ''

    # Make sure that changing directory does not break anything
    if test_preloading:
        these_kwargs = kwargs.copy()
        key = None
        for key in (
                'fname',
                'input_fname',  # artemis123
                'vhdr_fname',  # BV
                'pdf_fname',  # BTi
                'directory',  # CTF
                'filename',  # nedf
        ):
            try:
                fname = kwargs[key]
            except KeyError:
                key = None
            else:
                break
        # len(kwargs) == 0 for the fake arange reader
        if len(kwargs):
            assert key is not None, sorted(kwargs.keys())
            dirname = op.dirname(fname)
            these_kwargs[key] = op.basename(fname)
            these_kwargs['preload'] = False
            orig_dir = os.getcwd()
            try:
                os.chdir(dirname)
                raw_chdir = reader(**these_kwargs)
            finally:
                os.chdir(orig_dir)
            raw_chdir.load_data()

    return raw
                                                    raw,
                                                    hp,
                                                    lp,
                                                    cov,
                                                    fwd,
                                                    defaults.subjects_dir,
                                                    eps_fname,
                                                    fslabels,
                                                    return_generator=True)
         corr_mats[si, ix] = envelope_correlation(label_ts)
         # Compute pairwise degree source connectivity amongst labels
         degrees[si, ix] = mne.connectivity.degree(corr_mats[si, ix])
         fout_ = op.join(eps_dir, '%s_%s_fcDs.h5' % (subject, kk))
         write_hdf5(fname=fout_,
                    data={
                        'corr': corr_mats[si, ix],
                        'deg': degrees[si, ix]
                    },
                    overwrite=True)
         plt.close('all')
 for ix, (kk, vv) in enumerate(defaults.bands.items()):
     corr_ = corr_mats[:, ix].mean(0)
     # Plot group narrow-band corr matrix
     fig, ax = plt.subplots(figsize=(4, 4))
     img = ax.imshow(corr_,
                     cmap='viridis',
                     clim=np.percentile(corr_, [5, 95]),
                     interpolation='nearest',
                     origin='lower')
     fig.suptitle('%d - %dHz correlation matrix' % (vv[0], vv[1]))
     fig.colorbar(img, ax=ax)
     fig.tight_layout()
Exemple #4
0
def run_spatio_temporal_cluster_1samp_test(subjects,
                                           cond1,
                                           cond2,
                                           window_l,
                                           window_h,
                                           threshold=None,
                                           step_down_p=0,
                                           n_permutations=1024,
                                           n_jobs=1):
    t0 = time.time()
    contrasts = list()

    for subject in subjects:
        print(f'processing {subject}')
        # auditory
        fname_1 = op.join(
            meg_dir, subject,
            f'{subject}_audvis-dSPM-{spacing}-inverse-morph-filt-sss-{cond1}-stc'
        )
        # why `crop`: only deal with t > 0 to reduce multiple comparisons
        # why `T`: transpose to the correct shape
        stc_1 = mne.read_source_estimate(fname_1).magnitude().crop(
            window_l, window_h)

        # visual
        fname_2 = op.join(
            meg_dir, subject,
            f'{subject}_audvis-dSPM-{spacing}-inverse-morph-filt-sss-{cond2}-stc'
        )
        stc_2 = mne.read_source_estimate(fname_2).magnitude().crop(
            window_l, window_h)

        stc_diff = stc_1 - stc_2
        contrasts.append(stc_diff.data.T)

    # Get the right shape of difference data
    contrast_X = np.stack(contrasts, axis=0)
    # release memory
    del stc_1, stc_2, stc_diff, contrasts

    # prepare spatial adjacency
    fsaverage_src = mne.read_source_spaces(
        op.join(subjects_dir, 'fsaverage', 'bem',
                f'fsaverage-{spacing}-src.fif'))
    adjacency = mne.spatial_src_adjacency(fsaverage_src)

    # To use the "hat" adjustment method, sigma=1e-3 may be reasonable
    stat_fun = partial(mne.stats.ttest_1samp_no_p, sigma=1e-3)

    # Permutation test takes a long time to finish!
    t_obs, clusters, cluster_pv, H0 = \
        mne.stats.spatio_temporal_cluster_1samp_test(
            contrast_X,
            adjacency=adjacency,
            n_jobs=n_jobs,
            threshold=threshold,
            stat_fun=stat_fun,
            verbose=True)

    # save the result
    window = f'{int(window_l*1000)}_to_{int(window_h*1000)}'
    contrast_name = f'{cond1}_vs_{cond2}'
    cluster_name = op.join(rst_dir, f'{contrast_name}_{window}.h5')
    write_hdf5(cluster_name,
               dict(t_obs=t_obs,
                    clusters=clusters,
                    cluster_pv=cluster_pv,
                    H0=H0),
               title='mnepython',
               overwrite=True)
    elaped = time.time() - t0
    print(f'Save {cluster_name} after {timedelta(seconds=round(elaped))}')
from joblib import Parallel, delayed

subjects = lib.utils.get_subjects(cfg.camcan_meg_raw_path)

subjects_dir = cfg.mne_camcan_freesurfer_path

mne.utils.set_log_level('warning')


def _make_headmodels(subject):
    # print(subject)
    # return subject
    print('running ', subject)
    error = 'None'
    try:
        mne.bem.make_watershed_bem(subject,
                                   subjects_dir=subjects_dir,
                                   overwrite=True)
    except Exception as ee:
        error = str(ee)
        print(subject, error)
    outputs = glob.glob(
        op.join(cfg.mne_camcan_freesurfer_path, subject, 'bem', '*'))
    return dict(subject=subject, bem_outputs=outputs, error=error)


out = Parallel(n_jobs=11)(delayed(_make_headmodels)(subject=subject)
                          for subject in subjects)

h5io.write_hdf5('make_head_surfaces_output.h5', out, overwrite=True)
Exemple #6
0
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
                out_numbers, must_match, decim, run_indices):
    """Generate epochs from raw data based on events

    Can only complete after preprocessing is complete.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    in_names : list of str
        Names of input events.
    in_numbers : list of list of int
        Event numbers (in scored event files) associated with each name.
    analyses : list of str
        Lists of analyses of interest.
    out_names : list of list of str
        Event types to make out of old ones.
    out_numbers : list of list of int
        Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
        three event types, where the first two and last two event types from
        the original list get collapsed over).
    must_match : list of int
        Indices from the original in_names that must match in event counts
        before collapsing. Should eventually be expanded to allow for
        ratio-based collapsing.
    decim : int | list of int
        Amount to decimate.
    run_indices : array-like | None
        Run indices to include.
    """
    in_names = np.asanyarray(in_names)
    old_dict = dict()
    for n, e in zip(in_names, in_numbers):
        old_dict[n] = e

    # let's do some sanity checks
    if len(in_names) != len(in_numbers):
        raise RuntimeError('in_names (%d) must have same length as '
                           'in_numbers (%d)' %
                           (len(in_names), len(in_numbers)))
    if np.any(np.array(in_numbers) <= 0):
        raise ValueError('in_numbers must all be > 0')
    if len(out_names) != len(out_numbers):
        raise RuntimeError('out_names must have same length as out_numbers')
    for name, num in zip(out_names, out_numbers):
        num = np.array(num)
        if len(name) != len(np.unique(num[num > 0])):
            raise RuntimeError('each entry in out_names must have length '
                               'equal to the number of unique elements in the '
                               'corresponding entry in out_numbers:\n%s\n%s' %
                               (name, np.unique(num[num > 0])))
        if len(num) != len(in_names):
            raise RuntimeError('each entry in out_numbers must have the same '
                               'length as in_names')
        if (np.array(num) == 0).any():
            raise ValueError('no element of out_numbers can be zero')

    ch_namess = list()
    drop_logs = list()
    sfreqs = set()
    for si, subj in enumerate(subjects):
        if p.disp_files:
            print('  Loading raw files for subject %s.' % subj)
        epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
        if not op.isdir(epochs_dir):
            os.mkdir(epochs_dir)
        evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
        if not op.isdir(evoked_dir):
            os.mkdir(evoked_dir)
        # read in raw files
        raw_names = get_raw_fnames(p, subj, 'pca', False, False,
                                   run_indices[si])
        first_samps = []
        last_samps = []
        for raw_fname in raw_names:
            raw = read_raw_fif(raw_fname, preload=False)
            first_samps.append(raw._first_samps[0])
            last_samps.append(raw._last_samps[-1])
        raw = [read_raw_fif(fname, preload=False) for fname in raw_names]
        _fix_raw_eog_cals(raw)  # EOG epoch scales might be bad!
        raw = concatenate_raws(raw)
        # read in events
        events = _read_events(p, subj, run_indices[si], raw)
        new_sfreq = raw.info['sfreq'] / decim[si]
        if p.disp_files:
            print('    Epoching data (decim=%s -> sfreq=%0.1f Hz).' %
                  (decim[si], new_sfreq))
        if new_sfreq not in sfreqs:
            if len(sfreqs) > 0:
                warnings.warn('resulting new sampling frequency %s not equal '
                              'to previous values %s' % (new_sfreq, sfreqs))
            sfreqs.add(new_sfreq)
        epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(
            p, subj, analyses)
        mat_file, fif_file = epochs_fnames
        if p.autoreject_thresholds:
            assert len(p.autoreject_types) > 0
            assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
                       for a in p.autoreject_types)
            from autoreject import get_rejection_threshold
            print('    Computing autoreject thresholds', end='')
            rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
            temp_epochs = Epochs(raw,
                                 events,
                                 event_id=None,
                                 tmin=rtmin,
                                 tmax=rtmax,
                                 baseline=_get_baseline(p),
                                 proj=True,
                                 reject=None,
                                 flat=None,
                                 preload=True,
                                 decim=decim[si],
                                 reject_by_annotation=p.reject_epochs_by_annot)
            kwargs = dict()
            if 'verbose' in get_args(get_rejection_threshold):
                kwargs['verbose'] = False
            new_dict = get_rejection_threshold(temp_epochs, **kwargs)
            use_reject = dict()
            msgs = list()
            for k in p.autoreject_types:
                msgs.append('%s=%d %s' % (k, DEFAULTS['scalings'][k] *
                                          new_dict[k], DEFAULTS['units'][k]))
                use_reject[k] = new_dict[k]
            print(': ' + ', '.join(msgs))
            hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
            assert hdf5_file.endswith('.h5')
            write_hdf5(hdf5_file, use_reject, overwrite=True)
        else:
            use_reject = _handle_dict(p.reject, subj)
        # create epochs
        flat = _handle_dict(p.flat, subj)
        use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
        epochs = Epochs(raw,
                        events,
                        event_id=old_dict,
                        tmin=p.tmin,
                        tmax=p.tmax,
                        baseline=_get_baseline(p),
                        reject=use_reject,
                        flat=use_flat,
                        proj=p.epochs_proj,
                        preload=True,
                        decim=decim[si],
                        on_missing=p.on_missing,
                        reject_tmin=p.reject_tmin,
                        reject_tmax=p.reject_tmax,
                        reject_by_annotation=p.reject_epochs_by_annot)
        del raw
        if epochs.events.shape[0] < 1:
            epochs.plot_drop_log()
            raise ValueError('No valid epochs')
        drop_logs.append(epochs.drop_log)
        ch_namess.append(epochs.ch_names)
        # only kept trials that were not dropped
        sfreq = epochs.info['sfreq']
        # now deal with conditions to save evoked
        if p.disp_files:
            print('    Matching trial counts and saving data to disk.')
        for var, name in ((out_names, 'out_names'), (out_numbers,
                                                     'out_numbers'),
                          (must_match, 'must_match'), (evoked_fnames,
                                                       'evoked_fnames')):
            if len(var) != len(analyses):
                raise ValueError('len(%s) (%s) != len(analyses) (%s)' %
                                 (name, len(var), len(analyses)))
        for analysis, names, numbers, match, fn in zip(analyses, out_names,
                                                       out_numbers, must_match,
                                                       evoked_fnames):
            # do matching
            numbers = np.asanyarray(numbers)
            nn = numbers[numbers >= 0]
            new_numbers = []
            for num in numbers:
                if num > 0 and num not in new_numbers:
                    # Eventually we could relax this requirement, but not
                    # having it in place is likely to cause people pain...
                    if any(num < n for n in new_numbers):
                        raise RuntimeError('each list of new_numbers must be '
                                           ' monotonically increasing')
                    new_numbers.append(num)
            new_numbers = np.array(new_numbers)
            in_names_match = in_names[match]
            # use some variables to allow safe name re-use
            offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
            safety_str = '__mnefun_copy__'
            assert len(new_numbers) == len(names)  # checked above
            if p.match_fun is None:
                # first, equalize trial counts (this will make a copy)
                e = epochs[list(in_names[numbers > 0])]
                if len(in_names_match) > 1:
                    e.equalize_event_counts(in_names_match)

                # second, collapse relevant types
                for num, name in zip(new_numbers, names):
                    collapse = [
                        x for x in in_names[num == numbers] if x in e.event_id
                    ]
                    combine_event_ids(e,
                                      collapse,
                                      {name + safety_str: num + offset},
                                      copy=False)
                for num, name in zip(new_numbers, names):
                    e.events[e.events[:, 2] == num + offset, 2] -= offset
                    e.event_id[name] = num
                    del e.event_id[name + safety_str]
            else:  # custom matching
                e = p.match_fun(epochs.copy(), analysis, nn, in_names_match,
                                names)

            # now make evoked for each out type
            evokeds = list()
            n_standard = 0
            kinds = ['standard']
            if p.every_other:
                kinds += ['even', 'odd']
            for kind in kinds:
                for name in names:
                    this_e = e[name]
                    if kind == 'even':
                        this_e = this_e[::2]
                    elif kind == 'odd':
                        this_e = this_e[1::2]
                    else:
                        assert kind == 'standard'
                    if len(this_e) > 0:
                        ave = this_e.average(picks='all')
                        stde = this_e.standard_error(picks='all')
                        if kind != 'standard':
                            ave.comment += ' %s' % (kind, )
                            stde.comment += ' %s' % (kind, )
                        evokeds.append(ave)
                        evokeds.append(stde)
                        if kind == 'standard':
                            n_standard += 2
            write_evokeds(fn, evokeds)
            naves = [
                str(n) for n in sorted(
                    set([evoked.nave for evoked in evokeds[:n_standard]]))
            ]
            naves = ', '.join(naves)
            if p.disp_files:
                print('      Analysis "%s": %s epochs / condition' %
                      (analysis, naves))

        if p.disp_files:
            print('    Saving epochs to disk.')
        if 'mat' in p.epochs_type:
            spio.savemat(mat_file,
                         dict(epochs=epochs.get_data(),
                              events=epochs.events,
                              sfreq=sfreq,
                              drop_log=epochs.drop_log),
                         do_compression=True,
                         oned_as='column')
        if 'fif' in p.epochs_type:
            epochs.save(fif_file, **_get_epo_kwargs())

    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)