Пример #1
0
def main():
    """Make reports."""
    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(run_report,
                                              n_jobs=config.get_n_jobs())
        logs = parallel(
            run_func(cfg=get_config(subject=subject),
                     subject=subject,
                     session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)

        sessions = config.get_sessions()
        if not sessions:
            sessions = [None]

        if (config.get_task() is not None
                and config.get_task().lower() == 'rest'):
            msg = '    … skipping "average" report for "rest" task.'
            logger.info(**gen_log_kwargs(message=msg))
            return

        for session in sessions:
            run_report_average(cfg=get_config(subject='average'),
                               subject='average',
                               session=session)
def run_group_average_source(*, cfg, subject='average'):
    """Run group average in source space"""

    mne.datasets.fetch_fsaverage(subjects_dir=config.get_fs_subjects_dir())

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(morph_stc,
                                              n_jobs=config.get_n_jobs())
        all_morphed_stcs = parallel(
            run_func(cfg=cfg,
                     subject=subject,
                     fs_subject=config.get_fs_subject(subject),
                     session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))
        mean_morphed_stcs = np.array(all_morphed_stcs).mean(axis=0)

        # XXX to fix
        sessions = config.get_sessions()
        if sessions:
            session = sessions[0]
        else:
            session = None

        run_average(cfg=cfg,
                    session=session,
                    mean_morphed_stcs=mean_morphed_stcs)
def main():
    # Ensure we're also processing fsaverage if present
    subjects = config.get_subjects()
    if (Path(config.get_fs_subjects_dir()) / 'fsaverage').exists():
        subjects.append('fsaverage')

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(make_coreg_surfaces,
                                              n_jobs=config.get_n_jobs())

        parallel(run_func(get_config(), subject) for subject in subjects)
def main():
    """Run epochs."""
    # Here we use fewer n_jobs to prevent potential memory problems
    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(run_epochs,
                                              n_jobs=max(
                                                  config.get_n_jobs() // 4, 1))
        logs = parallel(
            run_func(cfg=get_config(subject, session),
                     subject=subject,
                     session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)
Пример #5
0
def main():
    """Run epochs."""
    parallel, run_func, _ = parallel_func(drop_ptp, n_jobs=config.get_n_jobs())

    with config.get_parallel_backend():
        logs = parallel(
            run_func(cfg=get_config(), subject=subject, session=session)
            for subject, session in
            itertools.product(
                config.get_subjects(),
                config.get_sessions()
            )
        )

        config.save_logs(logs)
Пример #6
0
def main():
    """Apply ICA."""
    if not config.spatial_filter == 'ica':
        msg = 'Skipping …'
        logger.info(**gen_log_kwargs(message=msg))
        return

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(apply_ica,
                                              n_jobs=config.get_n_jobs())
        logs = parallel(
            run_func(cfg=get_config(), subject=subject, session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)
def main():
    """Initialize the output directories."""
    msg = 'Running: Initializing output directories.'
    logger.info(**gen_log_kwargs(message=msg))

    with config.get_parallel_backend():
        init_dataset(cfg=get_config())
        parallel, run_func, _ = parallel_func(init_subject_dirs,
                                              n_jobs=config.get_n_jobs())
        parallel(
            run_func(cfg=get_config(), subject=subject, session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        msg = 'Completed: Initializing output directories.'
        logger.info(**gen_log_kwargs(message=msg))
Пример #8
0
def main() -> None:
    """Run freesurfer recon-all command on BIDS dataset.

    The script allows to run the freesurfer recon-all
    command on all subjects of your BIDS dataset. It can
    run in parallel with the --n_jobs parameter.

    It is built on top of the FreeSurfer BIDS app:

    https://github.com/BIDS-Apps/freesurfer

    and the MNE BIDS Pipeline

    https://mne.tools/mne-bids-pipeline

    You must have freesurfer available on your system.

    Run via the MNE BIDS Pipeline's `run.py`:

    python run.py --steps=freesurfer --config=your_pipeline_config.py

    """  # noqa

    logger.info('Running FreeSurfer')

    subjects = config.get_subjects()
    root_dir = config.get_bids_root()
    subjects_dir = Path(config.get_fs_subjects_dir())
    subjects_dir.mkdir(parents=True, exist_ok=True)

    with config.get_parallel_backend():
        n_jobs = config.get_n_jobs()
        parallel, run_func, _ = parallel_func(run_recon, n_jobs=n_jobs)
        parallel(run_func(root_dir, subject, fs_bids_app)
                 for subject in subjects)

        # Handle fsaverage
        fsaverage_dir = subjects_dir / 'fsaverage'
        if fsaverage_dir.exists():
            if fsaverage_dir.is_symlink():
                fsaverage_dir.unlink()
            else:
                shutil.rmtree(fsaverage_dir)

        env = os.environ
        shutil.copytree(f"{env['FREESURFER_HOME']}/subjects/fsaverage",
                        subjects_dir / 'fsaverage')
Пример #9
0
def main():
    """Run forward."""
    if not config.run_source_estimation:
        msg = '    … skipping: run_source_estimation is set to False.'
        logger.info(**gen_log_kwargs(message=msg))
        return

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(run_forward,
                                              n_jobs=config.get_n_jobs())
        logs = parallel(
            run_func(cfg=get_config(subject=subject),
                     subject=subject,
                     session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)
Пример #10
0
def main():
    """Run maxwell_filter."""
    if not config.use_maxwell_filter:
        msg = 'Skipping …'
        logger.info(**gen_log_kwargs(message=msg))
        return

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(run_maxwell_filter,
                                              n_jobs=config.get_n_jobs())
        logs = parallel(
            run_func(cfg=get_config(subject, session),
                     subject=subject,
                     session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)
Пример #11
0
def main():
    """Run cov."""
    if not config.run_source_estimation:
        msg = '    … skipping: run_source_estimation is set to False.'
        logger.info(**gen_log_kwargs(message=msg))
        return

    if config.noise_cov == "ad-hoc":
        msg = '    … skipping: using ad-hoc diagonal covariance.'
        logger.info(**gen_log_kwargs(message=msg))
        return

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(run_covariance,
                                              n_jobs=config.get_n_jobs())
        logs = parallel(
            run_func(cfg=get_config(), subject=subject, session=session)
            for subject, session in itertools.product(config.get_subjects(),
                                                      config.get_sessions()))

        config.save_logs(logs)
def main():
    """Run filter."""

    with config.get_parallel_backend():
        parallel, run_func, _ = parallel_func(filter_data,
                                              n_jobs=config.get_n_jobs())

        # Enabling different runs for different subjects
        sub_run_ses = []
        for subject in config.get_subjects():
            sub_run_ses += list(
                itertools.product([subject], config.get_runs(subject=subject),
                                  config.get_sessions()))

        logs = parallel(
            run_func(cfg=get_config(subject),
                     subject=subject,
                     run=run,
                     session=session) for subject, run, session in sub_run_ses)

        config.save_logs(logs)
def run_time_decoding(*, cfg, subject, condition1, condition2, session=None):
    msg = f'Contrasting conditions: {condition1} – {condition2}'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    fname_epochs = BIDSPath(subject=subject,
                            session=session,
                            task=cfg.task,
                            acquisition=cfg.acq,
                            run=None,
                            recording=cfg.rec,
                            space=cfg.space,
                            suffix='epo',
                            extension='.fif',
                            datatype=cfg.datatype,
                            root=cfg.deriv_root,
                            check=False)

    epochs = mne.read_epochs(fname_epochs)
    if cfg.analyze_channels:
        # We special-case the average reference here to work around a situation
        # where e.g. `analyze_channels` might contain only a single channel:
        # `concatenate_epochs` below will then fail when trying to create /
        # apply the projection. We can avoid this by removing an existing
        # average reference projection here, and applying the average reference
        # directly – without going through a projector.
        if 'eeg' in cfg.ch_types and cfg.eeg_reference == 'average':
            epochs.set_eeg_reference('average')
        else:
            epochs.apply_proj()
        epochs.pick(cfg.analyze_channels)

    # We define the epochs and the labels
    if isinstance(cfg.conditions, dict):
        epochs_conds = [cfg.conditions[condition1], cfg.conditions[condition2]]
        cond_names = [condition1, condition2]
    else:
        epochs_conds = cond_names = [condition1, condition2]
        epochs_conds = [condition1, condition2]

    epochs = mne.concatenate_epochs(
        [epochs[epochs_conds[0]], epochs[epochs_conds[1]]])
    n_cond1 = len(epochs[epochs_conds[0]])
    n_cond2 = len(epochs[epochs_conds[1]])

    X = epochs.get_data()
    y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
    with config.get_parallel_backend():
        clf = make_pipeline(
            StandardScaler(),
            LogReg(solver='liblinear', random_state=cfg.random_state,
                   n_jobs=1))

        se = SlidingEstimator(clf,
                              scoring=cfg.decoding_metric,
                              n_jobs=cfg.n_jobs)

        from sklearn.model_selection import StratifiedKFold
        cv = StratifiedKFold(shuffle=True)
        scores = cross_val_multiscore(se, X=X, y=y, cv=cv, n_jobs=1)

        # let's save the scores now
        a_vs_b = f'{cond_names[0]}+{cond_names[1]}'.replace(op.sep, '')
        processing = f'{a_vs_b}+{cfg.decoding_metric}'
        processing = processing.replace('_', '-').replace('-', '')

        fname_mat = fname_epochs.copy().update(suffix='decoding',
                                               processing=processing,
                                               extension='.mat')
        savemat(fname_mat, {'scores': scores, 'times': epochs.times})

        fname_tsv = fname_mat.copy().update(extension='.tsv')
        tabular_data = pd.DataFrame(
            dict(cond_1=[cond_names[0]] * len(epochs.times),
                 cond_2=[cond_names[1]] * len(epochs.times),
                 time=epochs.times,
                 mean_crossval_score=scores.mean(axis=0),
                 metric=[cfg.decoding_metric] * len(epochs.times)))
        tabular_data.to_csv(fname_tsv, sep='\t', index=False)