예제 #1
0
def read_features(fname):
    measures_classes = dict(inspect.getmembers(sys.modules['nice.measures']))
    contents = h5_listdir(fname)
    measures = list()
    epochs = None
    if 'nice/features/order' in contents:
        measure_order = read_hdf5(fname,
                                  title='nice/features/order',
                                  slash='replace')
    else:
        measure_order = [k for k in contents if 'nice/measure/' in k]

    if any('nice/data/epochs' in k for k in contents):
        epochs = read_hdf5(fname, title='nice/data/epochs', slash='replace')
        # MNE fix
        if 'filename' in epochs['info']:
            del (epochs['info']['filename'])
        epochs = mne.EpochsArray(data=epochs.pop('_data'),
                                 info=Info(epochs.pop('info')),
                                 tmin=epochs.pop('tmin'),
                                 event_id=epochs.pop('event_id'),
                                 events=epochs.pop('events'),
                                 reject=epochs.pop('reject'),
                                 flat=epochs.pop('flat'))
    # Read all PowerSpectralDensityEstimator estimators
    estimators = [
        k for k in contents
        if 'nice/container/PowerSpectralDensityEstimator' in k
    ]
    all_estimators = {}
    for estimator_name in estimators:
        estimator_comment = estimator_name.split('/')[-1]
        this_estimator = read_psd_estimator(fname, comment=estimator_comment)
        all_estimators[estimator_comment] = this_estimator
    for content in measure_order:
        _, _, my_class_name, comment = content.split('/')
        my_class = measures_classes[my_class_name]
        if issubclass(my_class, BaseTimeLocked):
            if not epochs:
                raise RuntimeError(
                    'Something weird has happened. You want to read a '
                    'measure that depends on epochs but '
                    'I could not find any epochs in the file you gave me.')
            measures.append(my_class._read(fname, epochs, comment=comment))
        elif issubclass(my_class, BasePowerSpectralDensity):
            measures.append(
                my_class._read(fname,
                               estimators=all_estimators,
                               comment=comment))
        elif issubclass(my_class, BaseMeasure):
            measures.append(my_class._read(fname, comment=comment))
        else:
            raise ValueError('Come on--this is not a Nice class!')
    measures = Features(measures)
    return measures
예제 #2
0
def _read_container(klass, fname, comment='default'):
    data = read_hdf5(fname, _get_title(klass, comment), slash='replace')
    init_params = {k: v for k, v in data.items() if not k.endswith('_')}
    attrs = {k: v for k, v in data.items() if k.endswith('_')}
    file_info = read_hdf5(fname, title='nice/data/ch_info', slash='replace')
    if 'filename' in file_info:
        del (file_info['filename'])
    attrs['ch_info_'] = Info(file_info)
    out = klass(**init_params)
    for k, v in attrs.items():
        if k.endswith('_'):
            setattr(out, k, v)
    return out
예제 #3
0
def _read_my_marker(klass, fname, comment='default'):
    # MANDATORY: This method should work for any marker as it is now.
    data = read_hdf5(fname, _get_title(klass, comment), slash='replace')
    init_params = {k: v for k, v in data.items() if not k.endswith('_')}
    attrs = {k: v for k, v in data.items() if k.endswith('_')}
    file_info = read_hdf5(fname, title='nice/data/ch_info', slash='replace')
    if 'filename' in file_info:
        del (file_info['filename'])
    attrs['ch_info_'] = Info(file_info)
    out = klass(**init_params)
    for k, v in attrs.items():
        if k.endswith('_'):
            setattr(out, k, v)
    return out
예제 #4
0
def load_neighbours(paths, study=None, **kwargs):
    chanlocs_dir = paths.get_path('chanpos', study=study)
    if study in ['A', 'D', 'E']:
        from mne.externals import h5io
        full_fname = op.join(chanlocs_dir, 'neighbours.hdf5')
        return h5io.read_hdf5(full_fname)
    else:
        from scipy.io import loadmat
        full_fname = op.join(chanlocs_dir, 'neighbours.mat')
        return loadmat(full_fname, squeeze_me=True)['neighbours']
예제 #5
0
def load_mapping(fname):
    # create new instances
    mapping = imwarp.DiffeomorphicMap(None, [])
    affine = imaffine.AffineMap(None)

    data = read_hdf5(fname + '.h5')

    mapping.__dict__ = data.get(type(mapping).__name__)
    affine.__dict__ = data.get(type(affine).__name__)

    return mapping, affine
예제 #6
0
def read_auto_reject(fname):
    """Read AutoReject object.

    Parameters
    ----------
    fname : str
        The filename where the AutoReject object is saved.

    Returns
    -------
    ar : instance of autoreject.AutoReject
    """
    state = read_hdf5(fname, title='autoreject')
    ar = AutoReject()
    ar.__setstate__(state)
    return ar
예제 #7
0
def read_morph(fname):
    if not fname.endswith('.h5'):
        fname += '.h5'
    morph_in = read_hdf5(fname)

    morph = dict()
    # create new instances
    morph['mapping'] = imwarp.DiffeomorphicMap(None, [])
    morph['mapping'].__dict__ = morph_in.get('mapping')
    morph['affine'] = imaffine.AffineMap(None)
    morph['affine'].__dict__ = morph_in.get('affine')

    morph['affine_reg'] = morph_in.get('affine_reg')
    morph['domain_shape'] = morph_in.get('domain_shape')

    return morph
예제 #8
0
def read_auto_reject(fname):
    """Read AutoReject object.

    Parameters
    ----------
    fname : str
        The filename where the AutoReject object is saved.

    Returns
    -------
    ar : instance of autoreject.AutoReject
    """
    state = read_hdf5(fname, title='autoreject')
    init_kwargs = {param: state[param] for param in _INIT_PARAMS}
    ar = AutoReject(**init_kwargs)
    ar.__setstate__(state)
    return ar
예제 #9
0
def test_chan_freq_clusters():
    from mne import create_info
    from mne.externals import h5io
    import matplotlib.pyplot as plt

    fname = op.join(data_dir, 'chan_alpha_range.hdf5')
    data_dict = h5io.read_hdf5(fname)

    try:
        info = create_info(data_dict['dimcoords'][0],
                           sfreq=250.,
                           ch_types='eeg',
                           montage='easycap-M1')
    except TypeError:
        info = create_info(data_dict['dimcoords'][0],
                           sfreq=250.,
                           ch_types='eeg')
        mntg = mne.channels.make_standard_montage('easycap-M1')
        info.set_montage(mntg)

    clst = Clusters(data_dict['stat'],
                    data_dict['clusters'],
                    data_dict['pvals'],
                    dimnames=data_dict['dimnames'],
                    dimcoords=data_dict['dimcoords'],
                    info=info,
                    description=data_dict['description'])

    topo = clst.plot(cluster_idx=1, freq=(8, 8.5))
    plt.close(topo.fig)
    clst.clusters = None
    topo = clst.plot(freq=(10, 11.5))
    plt.close(topo.fig)
    topo = clst.plot(freq=(10, 11.5), contours=4)
    plt.close(topo.fig)

    # multi axes:
    topo = clst.plot(cluster_idx=1, freq=[8, 10])
    assert len(topo.axes) == 2
    plt.close(topo.fig)

    marker_kwargs = dict(marker='+')
    topo = clst.plot(cluster_idx=1, freq=[8, 10], mark_kwargs=marker_kwargs)
    plt.close(topo.fig)
예제 #10
0
def read_connectivity(fname):
    """Read a Connectivity object from an HDF5 file.

    Parameters
    ----------
    fname : str
        The name of the file to read the connectivity from. The extension '.h5'
        will be appended if the given filename doesn't have it already.

    Returns
    -------
    connectivity : instance of Connectivity
        The Connectivity object that was stored in the file.

    See Also
    --------
    Connectivity.save : For saving connectivity objects
    """
    if not fname.endswith('.h5'):
        fname += '.h5'

    con_dict = read_hdf5(fname, title='conpy')
    con_type = con_dict['type']
    del con_dict['type']

    if con_type == 'all-to-all':
        return VertexConnectivity(
            data=con_dict['data'],
            pairs=con_dict['pairs'],
            vertices=con_dict['vertices'],
            vertex_degree=con_dict['source_degree'],
            subject=con_dict['subject'],
        )
    elif con_type == 'label':
        labels = [Label(**l) for l in con_dict['labels']]
        return LabelConnectivity(
            data=con_dict['data'],
            pairs=con_dict['pairs'],
            labels=labels,
            label_degree=con_dict['source_degree'],
            subject=con_dict['subject'],
        )
예제 #11
0
def read_ged(fname):
    '''Read GED object from hdf5 file.

    Parameters
    ----------
    fname : str
        File name or full file path.

    Returns
    -------
    ged : sarna.ged.GED
        Read GED object.
    '''
    from mne.externals import h5io

    data_dict = h5io.read_hdf5(fname)
    ged = GED(eig=data_dict['eig'],
              filters=data_dict['filters'],
              patterns=data_dict['patterns'],
              description=data_dict['description'])
    return ged
예제 #12
0
파일: obj.py 프로젝트: mmagnuski/borsar
def read_cluster(fname, subjects_dir=None, src=None, info=None):
    '''
    Read standard Clusters .hdf5 file and return Clusters object.
    You need to pass correct subjects_dir and src to `read_cluster` if your
    results are in source space or correct info if your results are in channel
    space.

    Parameters
    ----------
    fname : str
        File path for the file to read.
    subjects_dir : str, optional
        Path to Freesurfer subjects directory.
    src : mne.SourceSpaces, optional
        Source space that the results are reprseneted in.
    info : mne.Info, optional
        Channel space that the results are respresented in.

    Returns
    -------
    clst : Clusters
        Cluster results read from file.
    '''
    from mne.externals import h5io
    # subjects_dir = mne.utils.get_subjects_dir(subjects_dir, raise_error=True)
    data_dict = h5io.read_hdf5(fname)
    clst = Clusters(data_dict['stat'],
                    data_dict['clusters'],
                    data_dict['pvals'],
                    dimnames=data_dict['dimnames'],
                    dimcoords=data_dict['dimcoords'],
                    subject=data_dict['subject'],
                    subjects_dir=subjects_dir,
                    info=info,
                    src=src,
                    description=data_dict['description'])
    return clst
예제 #13
0
def _load_stat(fname):
    from mne.externals import h5io

    stat = h5io.read_hdf5(fname)
    if 'clusters' in stat:
        from borsar.cluster import Clusters

        if 'src' in fname:
            info = None
            study = (stat['description']['study']
                     if 'study' in stat['description'] else 'C')
            # FIXME: src should be different when 'asy'
            src = pth.paths.get_data('fwd', study=study)['src']
            selection = stat['description']['selection']
            subject = 'fsaverage_sym' if 'asy' in selection else 'fsaverage'
            subjects_dir = pth.paths.get_path('subjects_dir')
        else:
            import sarna
            src = None
            study = stat['description']['study']
            info = pth.paths.get_data('info', study=study)
            sarna.utils.fix_channel_pos(info)
            subject, subjects_dir = None, None

        clst = Clusters(stat['stat'],
                        stat['clusters'],
                        stat['pvals'],
                        dimnames=stat['dimnames'],
                        dimcoords=stat['dimcoords'],
                        info=info,
                        src=src,
                        description=stat['description'],
                        subject=subject,
                        subjects_dir=subjects_dir)
        return clst
    else:
        return stat
예제 #14
0
def _test_raw_reader(reader,
                     test_preloading=True,
                     test_kwargs=True,
                     boundary_decimal=2,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        rep = repr(raw)
        assert rep.count('<') == 1
        assert rep.count('>') == 1
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)
    else:
        raw = reader(**kwargs)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    data_max = full_data.max()
    data_min = full_data.min()
    # these limits could be relaxed if we actually find data with
    # huge values (in SI units)
    assert data_max < 1e5
    assert data_min > -1e5
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    meas_date = raw.info['meas_date']
    assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
    raw3 = read_raw_fif(out_fname)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    # Test picking with and without preload
    if test_preloading:
        preload_kwargs = (dict(preload=True), dict(preload=False))
    else:
        preload_kwargs = (dict(), )
    n_ch = len(raw.ch_names)
    picks = rng.permutation(n_ch)
    for preload_kwarg in preload_kwargs:
        these_kwargs = kwargs.copy()
        these_kwargs.update(preload_kwarg)
        # don't use the same filename or it could create problems
        if isinstance(these_kwargs.get('preload', None), str) and \
                op.isfile(these_kwargs['preload']):
            these_kwargs['preload'] += '-1'
        whole_raw = reader(**these_kwargs)
        print(whole_raw)  # __repr__
        assert n_ch >= 2
        picks_1 = picks[:n_ch // 2]
        picks_2 = picks[n_ch // 2:]
        raw_1 = whole_raw.copy().pick(picks_1)
        raw_2 = whole_raw.copy().pick(picks_2)
        data, times = whole_raw[:]
        data_1, times_1 = raw_1[:]
        data_2, times_2 = raw_2[:]
        assert_array_equal(times, times_1)
        assert_array_equal(data[picks_1], data_1)
        assert_array_equal(
            times,
            times_2,
        )
        assert_array_equal(data[picks_2], data_2)

    # Make sure that writing info to h5 format
    # (all fields should be compatible)
    if check_version('h5py'):
        fname_h5 = op.join(tempdir, 'info.h5')
        with _writing_info_hdf5(raw.info):
            write_hdf5(fname_h5, raw.info)
        new_info = Info(read_hdf5(fname_h5))
        assert object_diff(new_info, raw.info) == ''
    return raw
예제 #15
0
def gen_covariances(p, subjects, run_indices, decim):
    """Generate forward solutions

    Can only complete successfully once preprocessing is performed.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    run_indices : array-like | None
        Run indices to include.
    decim : list of int
        The subject decimations.
    """
    for si, subj in enumerate(subjects):
        print('  Subject %2d/%2d...' % (si + 1, len(subjects)), end='')
        cov_dir = op.join(p.work_dir, subj, p.cov_dir)
        if not op.isdir(cov_dir):
            os.mkdir(cov_dir)
        has_rank_arg = 'rank' in get_args(compute_covariance)
        kwargs = dict()
        kwargs_erm = dict()
        if p.cov_rank == 'full':  # backward compat
            if has_rank_arg:
                kwargs['rank'] = 'full'
        else:
            if not has_rank_arg:
                raise RuntimeError(
                    'There is no "rank" argument of compute_covariance, '
                    'you need to update MNE-Python')
            if p.cov_rank is None:
                assert p.compute_rank  # otherwise this is weird
                kwargs['rank'] = _compute_rank(p, subj, run_indices[si])
            else:
                kwargs['rank'] = p.cov_rank
        kwargs_erm['rank'] = kwargs['rank']
        if p.force_erm_cov_rank_full and has_rank_arg:
            kwargs_erm['rank'] = 'full'
        # Use the same thresholds we used for primary Epochs
        if p.autoreject_thresholds:
            reject = get_epochs_evokeds_fnames(p, subj, [])[0][1]
            reject = reject.replace('-epo.fif', '-reject.h5')
            reject = read_hdf5(reject)
        else:
            reject = _handle_dict(p.reject, subj)
        flat = _handle_dict(p.flat, subj)

        # Make empty room cov
        if p.runs_empty:
            if len(p.runs_empty) > 1:
                raise ValueError('Too many empty rooms; undefined output!')
            new_run = safe_inserter(p.runs_empty[0], subj)
            empty_cov_name = op.join(
                cov_dir, new_run + p.pca_extra + p.inv_tag + '-cov.fif')
            empty_fif = get_raw_fnames(p, subj, 'pca', 'only', False)[0]
            raw = read_raw_fif(empty_fif, preload=True)
            raw.pick_types(meg=True, eog=True, exclude='bads')
            use_reject, use_flat = _restrict_reject_flat(reject, flat, raw)
            if 'eeg' in use_reject:
                del use_reject['eeg']
            if 'eeg' in use_flat:
                del use_flat['eeg']
            cov = compute_raw_covariance(raw,
                                         reject=use_reject,
                                         flat=use_flat,
                                         method=p.cov_method,
                                         **kwargs_erm)
            write_cov(empty_cov_name, cov)

        # Make evoked covariances
        for ii, (inv_name, inv_run) in enumerate(zip(p.inv_names, p.inv_runs)):
            cov_name = op.join(
                cov_dir,
                safe_inserter(inv_name, subj) + ('-%d' % p.lp_cut) +
                p.inv_tag + '-cov.fif')
            if run_indices[si] is None:
                ridx = inv_run
            else:
                ridx = np.intersect1d(run_indices[si], inv_run)
            # read in raw files
            raw_fnames = get_raw_fnames(p, subj, 'pca', False, False, ridx)

            raws = []
            first_samps = []
            last_samps = []
            for raw_fname in raw_fnames:
                raws.append(read_raw_fif(raw_fname, preload=False))
                first_samps.append(raws[-1]._first_samps[0])
                last_samps.append(raws[-1]._last_samps[-1])
            _fix_raw_eog_cals(raws)  # safe b/c cov only needs MEEG
            raw = concatenate_raws(raws)
            # read in events
            events = _read_events(p, subj, ridx, raw)
            if p.pick_events_cov is not None:
                old_count = sum(len(e) for e in events)
                if callable(p.pick_events_cov):
                    picker = p.pick_events_cov
                else:
                    picker = p.pick_events_cov[ii]
                events = picker(events)
                new_count = len(events)
                print('  Using %s/%s events for %s' %
                      (new_count, old_count, op.basename(cov_name)))
            # create epochs
            use_reject, use_flat = _restrict_reject_flat(reject, flat, raw)
            baseline = _get_baseline(p)
            epochs = Epochs(
                raw,
                events,
                event_id=None,
                tmin=baseline[0],
                tmax=baseline[1],
                baseline=(None, None),
                proj=False,
                reject=use_reject,
                flat=use_flat,
                preload=True,
                decim=decim[si],
                verbose='error',  # ignore decim-related warnings
                on_missing=p.on_missing,
                reject_by_annotation=p.reject_epochs_by_annot)
            epochs.pick_types(meg=True, eeg=True, exclude=[])
            cov = compute_covariance(epochs, method=p.cov_method, **kwargs)
            if kwargs.get('rank', None) not in (None, 'full'):
                want_rank = sum(kwargs['rank'].values())
                out_rank = compute_whitener(cov,
                                            epochs.info,
                                            return_rank=True,
                                            verbose='error')[2]
                if want_rank != out_rank:
                    # Hopefully we never hit this code path, but let's keep
                    # some debugging stuff around just in case
                    plot_cov(cov, epochs.info)
                    epochs_fnames, _ = get_epochs_evokeds_fnames(
                        p, subj, p.analyses)
                    epochs2 = read_epochs(epochs_fnames[1], preload=True)
                    idx = np.searchsorted(epochs.events[:, 0],
                                          epochs2.events[:, 0])
                    assert len(np.unique(idx)) == len(idx)
                    epochs = epochs[idx]
                    assert np.array_equal(epochs.events[:, 0],
                                          epochs2.events[:, 0])
                    epochs2.pick_types(meg=True, eeg=True, exclude=[])
                    import matplotlib.pyplot as plt
                    plt.figure()
                    for eps in (epochs, epochs2):
                        eps = eps.get_data().transpose([1, 0, 2])
                        eps = eps.reshape(len(eps), -1)
                        plt.plot(
                            np.log10(np.maximum(linalg.svdvals(eps), 1e-50)))
                    epochs.plot()
                    baseline = _get_baseline(p)
                    epochs2.copy().crop(*baseline).plot()
                    raise RuntimeError('Error computing rank')

            write_cov(cov_name, cov)
        print()
    - MAE of one modality versus MAE of another modality.
"""
# Author: Denis A. Engemann <*****@*****.**>
#
# License: BSD (3-clause)

from itertools import combinations
import numpy as np
import pandas as pd
from mne.externals import h5io

IN_DEPENDENCE = './data/age_stacked_dependence_model-full.h5'
OUT_DEPENDENCE_1D = './data/age_stacked_dependence_model-full-1d.csv'
OUT_DEPENDENCE_2D = './data/age_stacked_dependence_model-full-2d.csv'

dependence = h5io.read_hdf5(IN_DEPENDENCE.format('model-full'))

# check source code for contour potting:
# https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/inspection/partial_dependence.py#L619


if False:
    preds = dependence[0]['2d'][
        'MEG envelope diag--MEG mne_envelope_cross alpha'][0]
    values = dependence[0]['2d'][
        'MEG envelope diag--MEG mne_envelope_cross alpha'][1]

    XX, YY = np.meshgrid(values[0], values[1])

    Z = preds.T
    min_pd = Z.min()
예제 #17
0
def _test_raw_reader(reader, test_preloading=True, test_kwargs=True,
                     boundary_decimal=2, test_scaling=True, test_rank=True,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        rep = repr(raw)
        assert rep.count('<') == 1
        assert rep.count('>') == 1
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec *
                            raw.info['sfreq'])), raw.n_times)
        slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
                  slice(3, 300), slice(None), slice(1, bnd)]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
                       slice(0, bnd + 100)]
        other_raws = [reader(preload=buffer_fname, **kwargs),
                      reader(preload=False, **kwargs)]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)

        # test projection vs cals and data units
        other_raw = reader(preload=False, **kwargs)
        other_raw.del_proj()
        eeg = meg = fnirs = False
        if 'eeg' in raw:
            eeg, atol = True, 1e-18
        elif 'grad' in raw:
            meg, atol = 'grad', 1e-24
        elif 'mag' in raw:
            meg, atol = 'mag', 1e-24
        else:
            assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?'
            fnirs, atol = 'fnirs_cw_amplitude', 1e-10
        picks = pick_types(
            other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs)
        col_names = [other_raw.ch_names[pick] for pick in picks]
        proj = np.ones((1, len(picks)))
        proj /= proj.shape[1]
        proj = Projection(
            data=dict(data=proj, nrow=1, row_names=None,
                      col_names=col_names, ncol=len(picks)),
            active=False)
        assert len(other_raw.info['projs']) == 0
        other_raw.add_proj(proj)
        assert len(other_raw.info['projs']) == 1
        # Orders of projector application, data loading, and reordering
        # equivalent:
        # 1. load->apply->get
        data_load_apply_get = \
            other_raw.copy().load_data().apply_proj().get_data(picks)
        # 2. apply->get (and don't allow apply->pick)
        apply = other_raw.copy().apply_proj()
        data_apply_get = apply.get_data(picks)
        data_apply_get_0 = apply.get_data(picks[0])[0]
        with pytest.raises(RuntimeError, match='loaded'):
            apply.copy().pick(picks[0]).get_data()
        # 3. apply->load->get
        data_apply_load_get = apply.copy().load_data().get_data(picks)
        data_apply_load_get_0, data_apply_load_get_1 = \
            apply.copy().load_data().pick(picks[:2]).get_data()
        # 4. reorder->apply->load->get
        all_picks = np.arange(len(other_raw.ch_names))
        reord = np.concatenate((
            picks[1::2],
            picks[0::2],
            np.setdiff1d(all_picks, picks)))
        rev = np.argsort(reord)
        assert_array_equal(reord[rev], all_picks)
        assert_array_equal(rev[reord], all_picks)
        reorder = other_raw.copy().pick(reord)
        assert reorder.ch_names == [other_raw.ch_names[r] for r in reord]
        assert reorder.ch_names[0] == other_raw.ch_names[picks[1]]
        assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1]))
        reorder_apply = reorder.copy().apply_proj()
        assert reorder_apply.ch_names == reorder.ch_names
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert_allclose(reorder_apply.get_data([0]), apply.get_data(picks[1]),
                        atol=1e-18)
        data_reorder_apply_load_get = \
            reorder_apply.load_data().get_data(rev[:len(picks)])
        data_reorder_apply_load_get_1 = \
            reorder_apply.copy().load_data().pick([0]).get_data()[0]
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert (data_load_apply_get.shape ==
                data_apply_get.shape ==
                data_apply_load_get.shape ==
                data_reorder_apply_load_get.shape)
        del apply
        # first check that our data are (probably) in the right units
        data = data_load_apply_get.copy()
        data = data - np.mean(data, axis=1, keepdims=True)  # can be offsets
        np.abs(data, out=data)
        if test_scaling:
            maxval = atol * 1e16
            assert_array_less(data, maxval)
            minval = atol * 1e6
            assert_array_less(minval, np.median(data))
        else:
            atol = 1e-7 * np.median(data)  # 1e-7 * MAD
        # ranks should all be reduced by 1
        if test_rank == 'less':
            cmp = np.less
        elif test_rank is False:
            cmp = None
        else:  # anything else is like True or 'equal'
            assert test_rank is True or test_rank == 'equal', test_rank
            cmp = np.equal
        rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get)
        rank_apply_get = np.linalg.matrix_rank(data_apply_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        if cmp is not None:
            assert cmp(rank_load_apply_get, len(col_names) - 1)
            assert cmp(rank_apply_get, len(col_names) - 1)
            assert cmp(rank_apply_load_get, len(col_names) - 1)
        # and they should all match
        t_kw = dict(
            atol=atol, err_msg='before != after, likely _mult_cal_one prob')
        assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw)
        assert_allclose(data_apply_load_get_1,
                        data_reorder_apply_load_get_1, **t_kw)
        assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_get, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw)
        if 'eeg' in raw:
            other_raw.del_proj()
            direct = \
                other_raw.copy().load_data().set_eeg_reference().get_data()
            other_raw.set_eeg_reference(projection=True)
            assert len(other_raw.info['projs']) == 1
            this_proj = other_raw.info['projs'][0]['data']
            assert this_proj['col_names'] == col_names
            assert this_proj['data'].shape == proj['data']['data'].shape
            assert_allclose(this_proj['data'], proj['data']['data'])
            proj = other_raw.apply_proj().get_data()
            assert_allclose(proj[picks], data_load_apply_get, atol=1e-10)
            assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg'])
    else:
        raw = reader(**kwargs)
    assert_named_constants(raw.info)
    # smoke test for gh #9743
    ids = [id(ch['loc']) for ch in raw.info['chs']]
    assert len(set(ids)) == len(ids)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    data_max = full_data.max()
    data_min = full_data.min()
    # these limits could be relaxed if we actually find data with
    # huge values (in SI units)
    assert data_max < 1e5
    assert data_min > -1e5
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    meas_date = raw.info['meas_date']
    assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))

    # test repr_html
    assert 'Good channels' in raw.info._repr_html_()

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)

    # Test saving with not correct extension
    out_fname_h5 = op.join(tempdir, 'test_raw.h5')
    with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'):
        raw.save(out_fname_h5)

    raw3 = read_raw_fif(out_fname)
    assert_named_constants(raw3.info)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_allclose(raw.times, raw3.times, atol=1e-6, rtol=1e-6)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    # Test picking with and without preload
    if test_preloading:
        preload_kwargs = (dict(preload=True), dict(preload=False))
    else:
        preload_kwargs = (dict(),)
    n_ch = len(raw.ch_names)
    picks = rng.permutation(n_ch)
    for preload_kwarg in preload_kwargs:
        these_kwargs = kwargs.copy()
        these_kwargs.update(preload_kwarg)
        # don't use the same filename or it could create problems
        if isinstance(these_kwargs.get('preload', None), str) and \
                op.isfile(these_kwargs['preload']):
            these_kwargs['preload'] += '-1'
        whole_raw = reader(**these_kwargs)
        print(whole_raw)  # __repr__
        assert n_ch >= 2
        picks_1 = picks[:n_ch // 2]
        picks_2 = picks[n_ch // 2:]
        raw_1 = whole_raw.copy().pick(picks_1)
        raw_2 = whole_raw.copy().pick(picks_2)
        data, times = whole_raw[:]
        data_1, times_1 = raw_1[:]
        data_2, times_2 = raw_2[:]
        assert_array_equal(times, times_1)
        assert_array_equal(data[picks_1], data_1)
        assert_array_equal(times, times_2,)
        assert_array_equal(data[picks_2], data_2)

    # Make sure that writing info to h5 format
    # (all fields should be compatible)
    if check_version('h5py'):
        fname_h5 = op.join(tempdir, 'info.h5')
        with _writing_info_hdf5(raw.info):
            write_hdf5(fname_h5, raw.info)
        new_info = Info(read_hdf5(fname_h5))
        assert object_diff(new_info, raw.info) == ''

    # Make sure that changing directory does not break anything
    if test_preloading:
        these_kwargs = kwargs.copy()
        key = None
        for key in ('fname',
                    'input_fname',  # artemis123
                    'vhdr_fname',  # BV
                    'pdf_fname',  # BTi
                    'directory',  # CTF
                    'filename',  # nedf
                    ):
            try:
                fname = kwargs[key]
            except KeyError:
                key = None
            else:
                break
        # len(kwargs) == 0 for the fake arange reader
        if len(kwargs):
            assert key is not None, sorted(kwargs.keys())
            dirname = op.dirname(fname)
            these_kwargs[key] = op.basename(fname)
            these_kwargs['preload'] = False
            orig_dir = os.getcwd()
            try:
                os.chdir(dirname)
                raw_chdir = reader(**these_kwargs)
            finally:
                os.chdir(orig_dir)
            raw_chdir.load_data()

    return raw
예제 #18
0
def load_psd(path,
             study='C',
             eyes='closed',
             space='avg',
             winlen=2.,
             step=0.5,
             reg='.+',
             weight_norm='.+',
             task=None):
    '''
    Load power spectrum density for given analysis.
    '''
    from scipy.io import loadmat

    prefix = 'psd_study-{}_eyes-{}_space-{}'.format(study, eyes, space)
    if space in ['avg', 'csd']:
        prefix = prefix + '_winlen-{}_step-{}'.format(winlen, step)
    elif space == 'src':
        reg_pattern = '_reg-{:.2f}' if not isinstance(reg, str) else '_reg-{}'
        prefix = prefix + (reg_pattern + 'weightnorm-{}')
        prefix = prefix.format(reg, weight_norm)

    # all psds are in C directory for convenience
    study_dir = path.get_path('main', study='C')
    psd_dir = op.join(study_dir, 'analysis', 'psd')
    files_with_prefix = [
        f for f in os.listdir(psd_dir)
        if (f.endswith('.mat') or f.endswith('.hdf5'))
        and len(re.findall(prefix, f)) > 0
    ]

    num_good_files = len(files_with_prefix)
    if num_good_files == 0:
        msg = 'Could not find file matching prefix: {}'.format(prefix)
        raise FileNotFoundError(msg)

    if num_good_files > 1:
        from warnings import warn
        fls = ', '.join(files_with_prefix)
        warn('More than one psd file matching specified criteria: {}.'
             'Loading the first of the matching files.'.format(fls))

    fname = files_with_prefix[0]
    if fname.endswith('.mat'):
        psds_mat = loadmat(op.join(psd_dir, fname))

        # cleanup data
        if space == 'src':
            keys = ['psd', 'freq', 'subject_id']
            psds, *rest = [psds_mat[k] for k in keys]
            freq, subj_id = [x.ravel() for x in rest]
            return psds, freq, None, subj_id
        else:
            keys = ['psd', 'freq', 'ch_names', 'subj_id']
            psds, *rest = [psds_mat[k] for k in keys]
            freq, ch_names, subj_id = [x.ravel() for x in rest]
            ch_names = [ch.replace(' ', '') for ch in ch_names]
            return psds, freq, ch_names, subj_id
    elif fname.endswith('.hdf5') and space == 'src':
        from mne.externals import h5io
        temp = h5io.read_hdf5(op.join(psd_dir, fname))
        if isinstance(temp['subject_id'], list):
            temp['subject_id'] = np.array(temp['subject_id'])
        return temp['psd'], temp['freq'], None, temp['subject_id']
예제 #19
0
def summarize_stats_clusters(reduce_columns=True, stat_dir='stats'):
    '''Summarize multiple analyses (saved in analysis dir) in a dataframe.

    Parameters
    ----------
    reduce_columns : bool
        Whether to remove columns with no variability from the output. Defaults
        to ``True``.
    stat_dir : str
        Subdirectory to use (``'stats'``, ``'add1'`` or ``'add2'``, unless
        additional subdirectories were created).

    Returns
    -------
    df : pandas.DataFrame
        Dataframe with summarized DiamSar analyses.
    '''
    from mne.externals import h5io

    stat_dir = op.join(pth.paths.get_path('main', 'C'), 'analysis', stat_dir)
    stat_files = [f for f in os.listdir(stat_dir) if f.endswith('.hdf5')]
    n_stat = len(stat_files)

    polarity_to_str = {True: 'pos', False: 'neg'}

    # first, create an empty dataframe
    stat_params = [
        'study', 'contrast', 'space', 'N_low', 'N_high', 'N_all', 'eyes',
        'selection', 'freq_range', 'avg_freq', 'transform', 'div_by_sum'
    ]
    stat_summary = [
        'min t', 'max t', 'n signif points', 'n clusters',
        'largest cluster size', 'min cluster p', 'eff dir', 'n signif clusters'
    ]
    df = pd.DataFrame(index=np.arange(1, n_stat + 1),
                      columns=stat_params + stat_summary)

    row_idx = 0
    for fname in stat_files:
        stat = h5io.read_hdf5(op.join(stat_dir, fname))

        if 'description' not in stat:
            continue

        row_idx += 1
        # fill the basic columns
        for col in stat_params:
            value = (stat['description'][col]
                     if col in stat['description'] else np.nan)
            if isinstance(value, (list, tuple, np.ndarray)):
                value = str(value)

            df.loc[row_idx, col] = value

        # summarize clusters
        n_clst = len(stat['clusters']) if stat['clusters'] is not None else 0

        min_cluster_p = stat['pvals'].min() if n_clst > 0 else np.nan
        n_below_thresh = (stat['pvals'] < 0.05).sum() if n_clst > 0 else 0
        n_signif_points = stat['clusters'].sum() if n_clst > 0 else 0
        if n_clst > 0:
            largest_cluster_size = max([c.sum() for c in stat['clusters']])
            polarity = stat['stat'][stat['clusters'][0]].mean() > 0
            polarity_str = polarity_to_str[polarity]
            largest_cluster_direction = polarity_str
        else:
            largest_cluster_size, largest_cluster_direction = np.nan, np.nan

        df.loc[row_idx, 'min t'] = stat['stat'].min()
        df.loc[row_idx, 'max t'] = stat['stat'].max()
        df.loc[row_idx, 'n signif points'] = n_signif_points
        df.loc[row_idx, 'n clusters'] = n_clst
        df.loc[row_idx, 'min cluster p'] = min_cluster_p
        df.loc[row_idx, 'n signif clusters'] = n_below_thresh
        df.loc[row_idx, 'largest cluster size'] = largest_cluster_size
        df.loc[row_idx, 'eff dir'] = largest_cluster_direction

    # reduce columns
    df = df.loc[:row_idx, :]
    if reduce_columns:
        df = remove_columns_with_no_variability(df)

    return utils.reformat_stat_table(df)
예제 #20
0
def summarize_stats_pairs(reduce_columns=True,
                          stat_dir='stats',
                          confounds=False,
                          progressbar='text'):
    '''Summarize multiple channel pair analyses (saved in analysis dir) in a
    dataframe.

    This takes longer than ``summarize_stats`` because it adds effect sizes and
    bootstrap confidence intervals for effect sizes.

    Parameters
    ----------
    reduce_columns : bool
        Whether to remove columns with no variability from the output. Defaults
        to ``True``.

    Returns
    -------
    df : pandas.DataFrame
        Dataframe with summarized results.
    '''
    from mne.externals import h5io
    from DiamSar.utils import progressbar as pbarobj

    stat_dir = op.join(pth.paths.get_path('main', 'C'), 'analysis', stat_dir)
    stat_files = [
        f for f in os.listdir(stat_dir)
        if f.endswith('.hdf5') and 'asy_pairs' in f
    ]
    n_stat = len(stat_files)

    # first, create an empty dataframe
    stat_params = [
        'study', 'contrast', 'space', 'ch_pair', 'N_low', 'N_high', 'N_all',
        'eyes', 'selection', 'freq_range', 'avg_freq', 'transform',
        'div_by_sum'
    ]
    stats = ['t', 'p', 'es', 'ci']
    stat_summary = [x + ' 1' for x in stats] + [x + ' 2' for x in stats]
    df = pd.DataFrame(index=np.arange(1, n_stat + 1),
                      columns=stat_params + stat_summary)

    pbar = pbarobj(progressbar, n_stat * 2)
    for idx, fname in enumerate(stat_files, start=1):
        stat = h5io.read_hdf5(op.join(stat_dir, fname))

        for col in stat_params:
            df.loc[idx, col] = stat[col] if col in stat else np.nan

        # for ES and CI we need the original data:
        sel_keys = [
            'study', 'contrast', 'eyes', 'space', 'freq_range', 'avg_freq',
            'selection', 'div_by_sum', 'transform'
        ]
        kwargs = {key: stat[key] for key in sel_keys}
        data = io.prepare_data(pth.paths,
                               confounds=confounds,
                               verbose=False,
                               **kwargs)

        # add stats
        for ch_idx in range(2):
            stat_col = ch_idx * 4
            t, p = stat['stat'][ch_idx], stat['pval'][ch_idx]
            df.loc[idx, stat_summary[stat_col]] = t
            df.loc[idx, stat_summary[stat_col + 1]] = p

            if 'vs' in contrast and not confounds:
                psd_high, psd_low, ch_names = (data['hi'], data['lo'],
                                               data['ch_names'])
                esci = esci_indep_cohens_d(psd_high[:, ch_idx],
                                           psd_low[:, ch_idx])
            else:
                psd_sel, bdi_sel, ch_names = (data['hilo'], data['bdi'],
                                              data['ch_names'])

                if 'vs' in contrast:
                    onechan = psd_sel[:, [ch_idx]]
                    beh = data['bdi']
                    grp1 = beh[:, -1] == beh[0, -1]
                    data1 = np.concatenate([onechan[grp1, :], beh[grp1, :]],
                                           axis=1)
                    data2 = np.concatenate([onechan[~grp1, :], beh[~grp1, :]],
                                           axis=1)
                    esci = esci_indep_cohens_d(data1,
                                               data2,
                                               n_boot=5000,
                                               has_preds=True)
                else:
                    if confounds:
                        beh = data['bdi']
                        esci = esci_regression_r(beh, psd_sel[:, ch_idx])
                    else:
                        esci = esci_regression_r(bdi_sel, psd_sel[:, ch_idx])

            df.loc[idx, stat_summary[stat_col + 2]] = esci['es']
            df.loc[idx, stat_summary[stat_col + 3]] = esci['ci']
            pbar.update(1)

    pbar.close()

    # split into two dfs
    pair_rows = df['selection'].str.contains('pairs').values
    df = df.loc[pair_rows, :].reset_index(drop=True)

    df = remove_columns_with_no_variability(df)
    df = utils.reformat_stat_table(df)

    return df
예제 #21
0
    p.lp_cut = 80

# Subject loop
for si, ss in enumerate(df.id.values):
    subject = f"genz{ss}"
    del ss
    subj_dir = os.path.join(defaults.megdata, subject)
    inv_fname = os.path.join(subj_dir, "inverse", f"{subject}-meg-erm-inv.fif")
    # Load raw
    print("Loading data for %s ..." % subject, end="")
    src_dir = os.path.join(subj_dir, "source")
    if not os.path.exists(src_dir):
        os.mkdir(src_dir)
    out_fname = os.path.join(src_dir, f"{state}_envcorr.h5")
    if op.isfile(out_fname):
        data = h5io.read_hdf5(out_fname)
        degree[si] = data["degree"]
        laplacian[si] = data["laplacian"]
        print(" previous run data used")
        continue

    if state == "task":
        p.pca_dir = "sss_pca_fif"
        raws = get_raw_fnames(p, subject, which="pca")
        if len(raws) == 0:
            print(" No raws found")
            continue
        raw = _concat_resamp_raws(p, subject, raws)[0]
    else:
        raw_fname = os.path.join(
            subj_dir,
예제 #22
0

# NOTE: change the name to the intended cluster result
clu_fname = op.join(rst_dir, 'aud_left_eq_vs_vis_left_eq_0_to_200.h5')

# prepare spatial adjency
fsaverage_src = mne.read_source_spaces(
    op.join(subjects_dir, 'fsaverage', 'bem', f'fsaverage-{spacing}-src.fif'))
fsaverage_vertices = [s['vertno'] for s in fsaverage_src]

# get info
info_data = mne.read_source_estimate(
    op.join(meg_dir, 'sample', f'sample_audvis-dSPM-{spacing}-inverse-morph-filt-sss-aud_left_eq'))
tstep = info_data.tstep

cluster_result = read_hdf5(clu_fname, title='mnepython')
clu = (cluster_result['t_obs'], cluster_result['clusters'],
       cluster_result['cluster_pv'], cluster_result['H0'])

os.environ["SUBJECTS_DIR"] = subjects_dir


# In[3]:


stc_cluster = mne.stats.summarize_clusters_stc(
    clu,
    tstep=tstep * 1000,
    vertices=fsaverage_vertices,
    subject='fsaverage')