def run_average(cfg, session, mean_morphed_stcs):
    subject = 'average'
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         processing=cfg.proc,
                         recording=cfg.rec,
                         space=cfg.space,
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    if isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions

    for condition, stc in zip(conditions, mean_morphed_stcs):
        method = cfg.inverse_method
        cond_str = sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
        morph_str = 'morph2fsaverage'

        fname_stc_avg = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')
        stc.save(fname_stc_avg)
def main():
    """Run group average in source space"""
    msg = 'Running Step 13: Grand-average source estimates'
    logger.info(gen_log_message(step=13, message=msg))

    if not config.run_source_estimation:
        msg = '    … skipping: run_source_estimation is set to False.'
        logger.info(gen_log_message(step=13, message=msg))
        return

    mne.datasets.fetch_fsaverage(subjects_dir=config.get_fs_subjects_dir())

    parallel, run_func, _ = parallel_func(morph_stc, n_jobs=config.N_JOBS)
    all_morphed_stcs = parallel(run_func(subject, session)
                                for subject, session in
                                itertools.product(config.get_subjects(),
                                                  config.get_sessions()))
    all_morphed_stcs = [morphed_stcs for morphed_stcs, subject in
                        zip(all_morphed_stcs, config.get_subjects())]
    mean_morphed_stcs = map(sum, zip(*all_morphed_stcs))

    subject = 'average'
    # XXX to fix
    if config.get_sessions():
        session = config.get_sessions()[0]
    else:
        session = None

    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         processing=config.proc,
                         recording=config.rec,
                         space=config.space,
                         datatype=config.get_datatype(),
                         root=config.deriv_root,
                         check=False)

    if isinstance(config.conditions, dict):
        conditions = list(config.conditions.keys())
    else:
        conditions = config.conditions

    for condition, this_stc in zip(conditions, mean_morphed_stcs):
        this_stc /= len(all_morphed_stcs)

        method = config.inverse_method
        cond_str = config.sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
        morph_str = 'morph2fsaverage'

        fname_stc_avg = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')
        this_stc.save(fname_stc_avg)

    msg = 'Completed Step 13: Grand-average source estimates'
    logger.info(gen_log_message(step=13, message=msg))
def run_inverse(subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         recording=config.rec,
                         space=config.space,
                         extension='.fif',
                         datatype=config.get_datatype(),
                         root=config.deriv_root,
                         check=False)

    fname_ave = bids_path.copy().update(suffix='ave')
    fname_fwd = bids_path.copy().update(suffix='fwd')
    fname_cov = bids_path.copy().update(suffix='cov')
    fname_inv = bids_path.copy().update(suffix='inv')

    evokeds = mne.read_evokeds(fname_ave)
    cov = mne.read_cov(fname_cov)
    forward = mne.read_forward_solution(fname_fwd)
    info = evokeds[0].info
    inverse_operator = make_inverse_operator(info,
                                             forward,
                                             cov,
                                             loose=0.2,
                                             depth=0.8,
                                             rank='info')
    write_inverse_operator(fname_inv, inverse_operator)

    # Apply inverse
    snr = 3.0
    lambda2 = 1.0 / snr**2

    if isinstance(config.conditions, dict):
        conditions = list(config.conditions.keys())
    else:
        conditions = config.conditions

    for condition, evoked in zip(conditions, evokeds):
        method = config.inverse_method
        pick_ori = None

        cond_str = config.sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
        fname_stc = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None)

        if "eeg" in config.ch_types:
            evoked.set_eeg_reference('average', projection=True)

        stc = apply_inverse(evoked=evoked,
                            inverse_operator=inverse_operator,
                            lambda2=lambda2,
                            method=method,
                            pick_ori=pick_ori)
        stc.save(fname_stc)
Beispiel #4
0
def run_time_frequency(*, cfg, subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    processing = None
    if cfg.spatial_filter is not None:
        processing = 'clean'

    fname_in = bids_path.copy().update(suffix='epo',
                                       processing=processing,
                                       extension='.fif')

    msg = f'Input: {fname_in}'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    epochs = mne.read_epochs(fname_in)
    if cfg.analyze_channels:
        # We special-case the average reference here.
        # See 02-sliding_estimator.py for more info.
        if 'eeg' in cfg.ch_types and cfg.eeg_reference == 'average':
            epochs.set_eeg_reference('average')
        else:
            epochs.apply_proj()
        epochs.pick(cfg.analyze_channels)

    freqs = np.arange(cfg.time_frequency_freq_min, cfg.time_frequency_freq_max)
    n_cycles = freqs / 3.

    for condition in cfg.time_frequency_conditions:
        this_epochs = epochs[condition]
        power, itc = mne.time_frequency.tfr_morlet(this_epochs,
                                                   freqs=freqs,
                                                   return_itc=True,
                                                   n_cycles=n_cycles)

        condition_str = sanitize_cond_name(condition)
        power_fname_out = bids_path.copy().update(
            suffix=f'power+{condition_str}+tfr', extension='.h5')
        itc_fname_out = bids_path.copy().update(
            suffix=f'itc+{condition_str}+tfr', extension='.h5')

        power.save(power_fname_out, overwrite=True)
        itc.save(itc_fname_out, overwrite=True)
def morph_stc(cfg, subject, fs_subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    morphed_stcs = []

    if cfg.task == 'rest':
        conditions = ['rest']
    else:
        if isinstance(cfg.conditions, dict):
            conditions = list(cfg.conditions.keys())
        else:
            conditions = cfg.conditions

    for condition in conditions:
        method = cfg.inverse_method
        cond_str = sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
        morph_str = 'morph2fsaverage'

        fname_stc = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{hemi_str}')
        fname_stc_fsaverage = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')

        stc = mne.read_source_estimate(fname_stc)

        morph = mne.compute_source_morph(stc,
                                         subject_from=fs_subject,
                                         subject_to='fsaverage',
                                         subjects_dir=cfg.fs_subjects_dir)
        stc_fsaverage = morph.apply(stc)
        stc_fsaverage.save(fname_stc_fsaverage)
        morphed_stcs.append(stc_fsaverage)

        del fname_stc, fname_stc_fsaverage

    return morphed_stcs
Beispiel #6
0
def morph_stc(subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         recording=config.rec,
                         space=config.space,
                         datatype=config.get_datatype(),
                         root=config.deriv_root,
                         check=False)

    fs_subject = config.get_fs_subject(subject)
    fs_subjects_dir = config.get_fs_subjects_dir()

    morphed_stcs = []
    for condition in config.conditions:
        method = config.inverse_method
        cond_str = config.sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
        morph_str = 'morph2fsaverage'

        fname_stc = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{hemi_str}')
        fname_stc_fsaverage = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')

        stc = mne.read_source_estimate(fname_stc)
        morph = mne.compute_source_morph(
            stc, subject_from=fs_subject, subject_to='fsaverage',
            subjects_dir=fs_subjects_dir)
        stc_fsaverage = morph.apply(stc)
        stc_fsaverage.save(fname_stc_fsaverage)
        morphed_stcs.append(stc_fsaverage)

        del fname_stc, fname_stc_fsaverage

    return morphed_stcs
Beispiel #7
0
def run_report_average(*, cfg, subject: str, session: str) -> None:
    # Group report
    import matplotlib.pyplot as plt  # nested import to help joblib

    evoked_fname = BIDSPath(subject=subject,
                            session=session,
                            task=cfg.task,
                            acquisition=cfg.acq,
                            run=None,
                            recording=cfg.rec,
                            space=cfg.space,
                            suffix='ave',
                            extension='.fif',
                            datatype=cfg.datatype,
                            root=cfg.deriv_root,
                            check=False)

    title = f'sub-{subject}'
    if session is not None:
        title += f', ses-{session}'
    if cfg.task is not None:
        title += f', task-{cfg.task}'

    rep = mne.Report(info_fname=evoked_fname,
                     subject='fsaverage',
                     subjects_dir=cfg.fs_subjects_dir,
                     title=title)
    evokeds = mne.read_evokeds(evoked_fname)
    if cfg.analyze_channels:
        for evoked in evokeds:
            evoked.pick(cfg.analyze_channels)

    method = cfg.inverse_method
    inverse_str = method
    hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
    morph_str = 'morph2fsaverage'

    if isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    #######################################################################
    #
    # Add events end epochs drop log stats.
    #
    add_event_counts(cfg=cfg, report=rep, session=session)

    #######################################################################
    #
    # Visualize evoked responses.
    #
    for condition, evoked in zip(conditions, evokeds):
        if condition in cfg.conditions:
            caption = f'Average: {condition}'
            section = 'Evoked'
        else:  # It's a contrast of two conditions.
            caption = f'Average Contrast: {condition[0]} – {condition[1]}'
            section = 'Contrast'

        fig = evoked.plot(spatial_colors=True, gfp=True, show=False)
        rep.add_figs_to_section(figs=fig,
                                captions=caption,
                                comments=evoked.comment,
                                section=section)

    #######################################################################
    #
    # Visualize decoding results.
    #
    if cfg.decode:
        for contrast in cfg.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{cfg.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = evoked_fname.copy().update(processing=processing,
                                                         suffix='decoding',
                                                         extension='.mat')
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores_gavg(cfg=cfg,
                                            decoding_data=decoding_data)
            caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            comment = (f'Based on N={decoding_data["N"].squeeze()} '
                       f'subjects. Standard error and confidence interval '
                       f'of the mean were bootstrapped with {cfg.n_boot} '
                       f'resamples.')
            rep.add_figs_to_section(figs=fig,
                                    captions=caption,
                                    comments=comment,
                                    section='Decoding')
            del decoding_data, cond_1, cond_2, caption, comment

    #######################################################################
    #
    # Visualize inverse solutions.
    #
    for condition, evoked in zip(conditions, evokeds):
        if condition in cfg.conditions:
            caption = f'Average: {condition}'
            cond_str = config.sanitize_cond_name(condition)
        else:  # It's a contrast of two conditions.
            # XXX Will change once we process contrasts here too
            continue

        section = 'Source'
        fname_stc_avg = evoked_fname.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}',
            extension=None)

        if op.exists(str(fname_stc_avg) + "-lh.stc"):
            stc = mne.read_source_estimate(fname_stc_avg, subject='fsaverage')
            _, peak_time = stc.get_peak()

            # Plot using 3d backend if available, and use Matplotlib
            # otherwise.
            if mne.viz.get_3d_backend() is not None:
                brain = stc.plot(views=['lat'],
                                 hemi='both',
                                 initial_time=peak_time,
                                 backend='pyvistaqt',
                                 time_viewer=True,
                                 show_traces=True,
                                 subjects_dir=cfg.fs_subjects_dir)
                brain.toggle_interface()
                figs = brain._renderer.figure
                captions = caption
            else:
                fig_lh = plt.figure()
                fig_rh = plt.figure()

                brain_lh = stc.plot(views='lat',
                                    hemi='lh',
                                    initial_time=peak_time,
                                    backend='matplotlib',
                                    figure=fig_lh,
                                    subjects_dir=cfg.fs_subjects_dir)
                brain_rh = stc.plot(views='lat',
                                    hemi='rh',
                                    initial_time=peak_time,
                                    backend='matplotlib',
                                    figure=fig_rh,
                                    subjects_dir=cfg.fs_subjects_dir)
                figs = [brain_lh, brain_rh]
                captions = [f'{caption} - left', f'{caption} - right']

            rep.add_figs_to_section(figs=figs,
                                    captions=captions,
                                    section='Sources')

            del peak_time

    fname_report = evoked_fname.copy().update(task=cfg.task,
                                              suffix='report',
                                              extension='.html')
    rep.save(fname=fname_report, open_browser=False, overwrite=True)

    plt.close('all')  # close all figures to save memory
Beispiel #8
0
def run_report(*, cfg, subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         extension='.fif',
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    fname_ave = bids_path.copy().update(suffix='ave')
    fname_epo = bids_path.copy().update(suffix='epo')
    if cfg.use_template_mri:
        fname_trans = 'fsaverage'
        has_trans = True
    else:
        fname_trans = bids_path.copy().update(suffix='trans')
        has_trans = op.exists(fname_trans)

    fname_epo = bids_path.copy().update(processing='clean', suffix='epo')
    fname_ica = bids_path.copy().update(suffix='ica')
    fname_decoding = fname_epo.copy().update(processing=None,
                                             suffix='decoding',
                                             extension='.mat')
    fname_tfr_pow = bids_path.copy().update(suffix='power+condition+tfr',
                                            extension='.h5')

    title = f'sub-{subject}'
    if session is not None:
        title += f', ses-{session}'
    if cfg.task is not None:
        title += f', task-{cfg.task}'

    params: Dict[str, Any] = dict(info_fname=fname_epo,
                                  raw_psd=True,
                                  subject=cfg.fs_subject,
                                  title=title)
    if has_trans:
        params['subjects_dir'] = cfg.fs_subjects_dir

    rep = mne.Report(**params)
    rep_kwargs: Dict[str, Any] = dict(data_path=fname_ave.fpath.parent,
                                      verbose=False)
    if not has_trans:
        rep_kwargs['render_bem'] = False

    if cfg.task is not None:
        rep_kwargs['pattern'] = f'*_task-{cfg.task}*'
    if mne.viz.get_3d_backend() is not None:
        with mne.viz.use_3d_backend('pyvistaqt'):
            rep.parse_folder(**rep_kwargs)
    else:
        rep.parse_folder(**rep_kwargs)

    # Visualize automated noisy channel detection.
    if cfg.find_noisy_channels_meg:
        figs, captions = plot_auto_scores(cfg=cfg,
                                          subject=subject,
                                          session=session)
        rep.add_figs_to_section(figs=figs,
                                captions=captions,
                                section='Data Quality')

    # Visualize events.
    if cfg.task.lower() != 'rest':
        events_fig = plot_events(cfg=cfg, subject=subject, session=session)
        rep.add_figs_to_section(figs=events_fig,
                                captions='Events in filtered continuous data',
                                section='Events')

    ###########################################################################
    #
    # Visualize effect of ICA artifact rejection.
    #
    if cfg.spatial_filter == 'ica':
        epochs = mne.read_epochs(fname_epo)
        ica = mne.preprocessing.read_ica(fname_ica)
        fig = ica.plot_overlay(epochs.average(), show=False)
        rep.add_figs_to_section(
            fig,
            captions=f'Evoked response (across all epochs) '
            f'before and after ICA '
            f'({len(ica.exclude)} ICs removed)',
            section='ICA')

    ###########################################################################
    #
    # Visualize TFR as topography.
    #
    if cfg.time_frequency_conditions is None:
        conditions = []
    elif isinstance(cfg.time_frequency_conditions, dict):
        conditions = list(cfg.time_frequency_conditions.keys())
    else:
        conditions = cfg.time_frequency_conditions.copy()

    for condition in conditions:
        cond = config.sanitize_cond_name(condition)
        fname_tfr_pow_cond = str(fname_tfr_pow.copy()).replace(
            "+condition+", f"+{cond}+")
        power = mne.time_frequency.read_tfrs(fname_tfr_pow_cond)
        fig = power[0].plot_topo(show=False,
                                 fig_facecolor='w',
                                 font_color='k',
                                 border='k')
        rep.add_figs_to_section(figs=fig,
                                captions=f"TFR Power: {condition}",
                                section="TFR")

    ###########################################################################
    #
    # Visualize evoked responses.
    #
    if cfg.conditions is None:
        conditions = []
    elif isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    if conditions:
        evokeds = mne.read_evokeds(fname_ave)
    else:
        evokeds = []

    for condition, evoked in zip(conditions, evokeds):
        if cfg.analyze_channels:
            evoked.pick(cfg.analyze_channels)

        if condition in cfg.conditions:
            caption = f'Condition: {condition}'
            section = 'Evoked'
        else:  # It's a contrast of two conditions.
            caption = f'Contrast: {condition[0]} – {condition[1]}'
            section = 'Contrast'

        fig = evoked.plot(spatial_colors=True, gfp=True, show=False)
        rep.add_figs_to_section(figs=fig,
                                captions=caption,
                                comments=evoked.comment,
                                section=section)

    ###########################################################################
    #
    # Visualize decoding results.
    #
    if cfg.decode:
        epochs = mne.read_epochs(fname_epo)

        for contrast in cfg.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{cfg.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = (fname_decoding.copy().update(
                processing=processing))
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores(
                times=epochs.times,
                cross_val_scores=decoding_data['scores'],
                metric=cfg.decoding_metric)

            caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            comment = (f'{len(epochs[cond_1])} × {cond_1} ./. '
                       f'{len(epochs[cond_2])} × {cond_2}')
            rep.add_figs_to_section(figs=fig,
                                    captions=caption,
                                    comments=comment,
                                    section='Decoding')
            del decoding_data, cond_1, cond_2, caption, comment

        del epochs

    ###########################################################################
    #
    # Visualize the coregistration & inverse solutions.
    #
    if has_trans:
        evokeds = mne.read_evokeds(fname_ave)

        # Omit our custom coreg plot here – this is now handled through
        # parse_folder() automatically. Keep the following code around for
        # future reference.
        #
        # # We can only plot the coregistration if we have a valid 3d backend.
        # if mne.viz.get_3d_backend() is not None:
        #     fig = mne.viz.plot_alignment(evoked.info, fname_trans,
        #                                  subject=cfg.fs_subject,
        #                                  subjects_dir=cfg.fs_subjects_dir,
        #                                  meg=True, dig=True, eeg=True)
        #     rep.add_figs_to_section(figs=fig, captions='Coregistration',
        #                             section='Coregistration')
        # else:
        #     msg = ('Cannot render sensor alignment (coregistration) because '
        #            'no usable 3d backend was found.')
        #     logger.warning(gen_log_message(message=msg,
        #                                    subject=subject, session=session))

        for condition, evoked in zip(conditions, evokeds):
            msg = f'Rendering inverse solution for {evoked.comment} …'
            logger.info(**gen_log_kwargs(
                message=msg, subject=subject, session=session))

            if condition in cfg.conditions:
                full_condition = config.sanitize_cond_name(evoked.comment)
                caption = f'Condition: {full_condition}'
                del full_condition
            else:  # It's a contrast of two conditions.
                # XXX Will change once we process contrasts here too
                continue

            method = cfg.inverse_method
            cond_str = config.sanitize_cond_name(condition)
            inverse_str = method
            hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.

            fname_stc = bids_path.copy().update(
                suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None)

            if op.exists(str(fname_stc) + "-lh.stc"):
                stc = mne.read_source_estimate(fname_stc,
                                               subject=cfg.fs_subject)
                _, peak_time = stc.get_peak()

                # Plot using 3d backend if available, and use Matplotlib
                # otherwise.
                import matplotlib.pyplot as plt

                if mne.viz.get_3d_backend() is not None:
                    brain = stc.plot(views=['lat'],
                                     hemi='split',
                                     initial_time=peak_time,
                                     backend='pyvistaqt',
                                     time_viewer=True,
                                     subjects_dir=cfg.fs_subjects_dir)
                    brain.toggle_interface()
                    brain._renderer.plotter.reset_camera()
                    brain._renderer.plotter.subplot(0, 0)
                    brain._renderer.plotter.reset_camera()
                    figs, ax = plt.subplots(figsize=(15, 10))
                    ax.imshow(brain.screenshot(time_viewer=True))
                    ax.axis('off')
                    comments = evoked.comment
                    captions = caption
                else:
                    fig_lh = plt.figure()
                    fig_rh = plt.figure()

                    brain_lh = stc.plot(views='lat',
                                        hemi='lh',
                                        initial_time=peak_time,
                                        backend='matplotlib',
                                        subjects_dir=cfg.fs_subjects_dir,
                                        figure=fig_lh)
                    brain_rh = stc.plot(views='lat',
                                        hemi='rh',
                                        initial_time=peak_time,
                                        subjects_dir=cfg.fs_subjects_dir,
                                        backend='matplotlib',
                                        figure=fig_rh)
                    figs = [brain_lh, brain_rh]
                    comments = [
                        f'{evoked.comment} - left hemisphere',
                        f'{evoked.comment} - right hemisphere'
                    ]
                    captions = [f'{caption} - left', f'{caption} - right']

                rep.add_figs_to_section(figs=figs,
                                        captions=captions,
                                        comments=comments,
                                        section='Sources')
                del peak_time

    if cfg.process_er:
        fig_er_psd = plot_er_psd(cfg=cfg, subject=subject, session=session)
        rep.add_figs_to_section(figs=fig_er_psd,
                                captions='Empty-Room Power Spectral Density '
                                '(after filtering)',
                                section='Empty-Room')

    fname_report = bids_path.copy().update(suffix='report', extension='.html')
    rep.save(fname=fname_report, open_browser=False, overwrite=True)
    import matplotlib.pyplot as plt  # nested import to help joblib
    plt.close('all')  # close all figures to save memory
Beispiel #9
0
def run_report(subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         recording=config.rec,
                         space=config.space,
                         extension='.fif',
                         datatype=config.get_datatype(),
                         root=config.deriv_root,
                         check=False)

    fname_ave = bids_path.copy().update(suffix='ave')
    fname_trans = bids_path.copy().update(suffix='trans')
    fname_epo = bids_path.copy().update(suffix='epo')
    fname_trans = bids_path.copy().update(suffix='trans')
    fname_ica = bids_path.copy().update(suffix='ica')
    fname_decoding = fname_epo.copy().update(suffix='decoding',
                                             extension='.mat')

    fs_subject = config.get_fs_subject(subject)
    fs_subjects_dir = config.get_fs_subjects_dir()

    params: Dict[str, Any] = dict(info_fname=fname_ave, raw_psd=True)
    if op.exists(fname_trans):
        params['subject'] = fs_subject
        params['subjects_dir'] = fs_subjects_dir

    rep = mne.Report(**params)
    rep_kwargs: Dict[str, Any] = dict(data_path=fname_ave.fpath.parent,
                                      verbose=False)
    if not op.exists(fname_trans):
        rep_kwargs['render_bem'] = False

    task = config.get_task()
    if task is not None:
        rep_kwargs['pattern'] = f'*_task-{task}*'
    if mne.viz.get_3d_backend() is not None:
        with mne.viz.use_3d_backend('pyvista'):
            rep.parse_folder(**rep_kwargs)
    else:
        rep.parse_folder(**rep_kwargs)

    # Visualize automated noisy channel detection.
    if config.find_noisy_channels_meg:
        figs, captions = plot_auto_scores(subject=subject, session=session)
        rep.add_figs_to_section(figs=figs,
                                captions=captions,
                                section='Data Quality')

    # Visualize events.
    events_fig = plot_events(subject=subject, session=session)
    rep.add_figs_to_section(figs=events_fig,
                            captions='Events in filtered continuous data',
                            section='Events')

    ###########################################################################
    #
    # Visualize effect of ICA artifact rejection.
    #
    if config.use_ica:
        epochs = mne.read_epochs(fname_epo)
        ica = mne.preprocessing.read_ica(fname_ica)
        fig = ica.plot_overlay(epochs.average(), show=False)
        rep.add_figs_to_section(fig,
                                captions='Evoked response (across all epochs) '
                                'before and after ICA',
                                section='ICA')

    ###########################################################################
    #
    # Visualize evoked responses.
    #
    conditions: List[Condition_T] = list(config.conditions)
    conditions.extend(config.contrasts)
    evokeds = mne.read_evokeds(fname_ave)
    if config.analyze_channels:
        for evoked in evokeds:
            evoked.pick(config.analyze_channels)

    for condition, evoked in zip(conditions, evokeds):
        if condition in config.conditions:
            caption = f'Condition: {condition}'
            section = 'Evoked'
        else:  # It's a contrast of two conditions.
            caption = f'Contrast: {condition[0]} – {condition[1]}'
            section = 'Contrast'

        fig = evoked.plot(spatial_colors=True, gfp=True, show=False)
        rep.add_figs_to_section(figs=fig,
                                captions=caption,
                                comments=evoked.comment,
                                section=section)

    ###########################################################################
    #
    # Visualize decoding results.
    #
    if config.decode:
        epochs = mne.read_epochs(fname_epo)

        for contrast in config.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}-{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{config.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = (fname_decoding.copy().update(
                processing=processing))
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores(
                times=epochs.times,
                cross_val_scores=decoding_data['scores'],
                metric=config.decoding_metric)

            caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            comment = (f'{len(epochs[cond_1])} × {cond_1} ./. '
                       f'{len(epochs[cond_2])} × {cond_2}')
            rep.add_figs_to_section(figs=fig,
                                    captions=caption,
                                    comments=comment,
                                    section='Decoding')
            del decoding_data, cond_1, cond_2, caption, comment

        del epochs

    ###########################################################################
    #
    # Visualize the coregistration & inverse solutions.
    #
    evokeds = mne.read_evokeds(fname_ave)

    if op.exists(fname_trans):
        # We can only plot the coregistration if we have a valid 3d backend.
        if mne.viz.get_3d_backend() is not None:
            fig = mne.viz.plot_alignment(evoked.info,
                                         fname_trans,
                                         subject=fs_subject,
                                         subjects_dir=fs_subjects_dir,
                                         meg=True,
                                         dig=True,
                                         eeg=True)
            rep.add_figs_to_section(figs=fig,
                                    captions='Coregistration',
                                    section='Coregistration')
        else:
            msg = ('Cannot render sensor alignment (coregistration) because '
                   'no usable 3d backend was found.')
            logger.warning(
                gen_log_message(message=msg,
                                step=99,
                                subject=subject,
                                session=session))

        for condition, evoked in zip(conditions, evokeds):
            msg = f'Rendering inverse solution for {evoked.comment} …'
            logger.info(
                gen_log_message(message=msg,
                                step=99,
                                subject=subject,
                                session=session))

            if condition in config.conditions:
                full_condition = config.sanitize_cond_name(evoked.comment)
                caption = f'Condition: {full_condition}'
                del full_condition
            else:  # It's a contrast of two conditions.
                # XXX Will change once we process contrasts here too
                continue

            method = config.inverse_method
            cond_str = config.sanitize_cond_name(condition)
            inverse_str = method
            hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.

            fname_stc = bids_path.copy().update(
                suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None)

            if op.exists(str(fname_stc) + "-lh.stc"):
                stc = mne.read_source_estimate(fname_stc, subject=fs_subject)
                _, peak_time = stc.get_peak()

                # Plot using 3d backend if available, and use Matplotlib
                # otherwise.
                import matplotlib.pyplot as plt

                if mne.viz.get_3d_backend() is not None:
                    brain = stc.plot(views=['lat'],
                                     hemi='split',
                                     initial_time=peak_time,
                                     backend='pyvista',
                                     time_viewer=True,
                                     subjects_dir=fs_subjects_dir)
                    brain.toggle_interface()
                    brain._renderer.plotter.reset_camera()
                    brain._renderer.plotter.subplot(0, 0)
                    brain._renderer.plotter.reset_camera()
                    figs, ax = plt.subplots(figsize=(15, 10))
                    ax.imshow(brain.screenshot(time_viewer=True))
                    ax.axis('off')
                    comments = evoked.comment
                    captions = caption
                else:
                    fig_lh = plt.figure()
                    fig_rh = plt.figure()

                    brain_lh = stc.plot(views='lat',
                                        hemi='lh',
                                        initial_time=peak_time,
                                        backend='matplotlib',
                                        subjects_dir=fs_subjects_dir,
                                        figure=fig_lh)
                    brain_rh = stc.plot(views='lat',
                                        hemi='rh',
                                        initial_time=peak_time,
                                        subjects_dir=fs_subjects_dir,
                                        backend='matplotlib',
                                        figure=fig_rh)
                    figs = [brain_lh, brain_rh]
                    comments = [
                        f'{evoked.comment} - left hemisphere',
                        f'{evoked.comment} - right hemisphere'
                    ]
                    captions = [f'{caption} - left', f'{caption} - right']

                rep.add_figs_to_section(figs=figs,
                                        captions=captions,
                                        comments=comments,
                                        section='Sources')
                del peak_time

    if config.process_er:
        fig_er_psd = plot_er_psd(subject=subject, session=session)
        rep.add_figs_to_section(figs=fig_er_psd,
                                captions='Empty-Room Power Spectral Density '
                                '(after filtering)',
                                section='Empty-Room')

    fname_report = bids_path.copy().update(suffix='report', extension='.html')
    rep.save(fname=fname_report, open_browser=False, overwrite=True)
    import matplotlib.pyplot as plt  # nested import to help joblib
    plt.close('all')  # close all figures to save memory
Beispiel #10
0
def run_report_average(*, cfg, subject: str, session: str) -> None:
    # Group report
    import matplotlib.pyplot as plt  # nested import to help joblib

    msg = 'Generating grand average report …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    evoked_fname = BIDSPath(subject=subject,
                            session=session,
                            task=cfg.task,
                            acquisition=cfg.acq,
                            run=None,
                            recording=cfg.rec,
                            space=cfg.space,
                            suffix='ave',
                            extension='.fif',
                            datatype=cfg.datatype,
                            root=cfg.deriv_root,
                            check=False)

    title = f'sub-{subject}'
    if session is not None:
        title += f', ses-{session}'
    if cfg.task is not None:
        title += f', task-{cfg.task}'

    report = mne.Report(title=title, raw_psd=True)
    evokeds = mne.read_evokeds(evoked_fname)
    if cfg.analyze_channels:
        for evoked in evokeds:
            evoked.pick(cfg.analyze_channels)

    method = cfg.inverse_method
    inverse_str = method
    hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.
    morph_str = 'morph2fsaverage'

    if isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    #######################################################################
    #
    # Add event stats.
    #
    add_event_counts(cfg=cfg, report=report, session=session)

    #######################################################################
    #
    # Visualize evoked responses.
    #
    for condition, evoked in zip(conditions, evokeds):
        if condition in cfg.conditions:
            title = f'Average: {condition}'
            tags = ('evoked',
                    config.sanitize_cond_name(condition).lower().replace(
                        ' ', ''))
        else:  # It's a contrast of two conditions.
            title = f'Average Contrast: {condition[0]} – {condition[1]}'
            tags = (
                'evoked', f'{config.sanitize_cond_name(condition[0])} – '
                f'{config.sanitize_cond_name(condition[1])}'.lower().replace(
                    ' ', ''))

        report.add_evokeds(
            evokeds=evoked,
            titles=title,
            projs=False,
            tags=tags,
            # captions=evoked.comment  # TODO upstream
        )

    #######################################################################
    #
    # Visualize decoding results.
    #
    if cfg.decode:
        for contrast in cfg.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{cfg.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = evoked_fname.copy().update(processing=processing,
                                                         suffix='decoding',
                                                         extension='.mat')
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores_gavg(cfg=cfg,
                                            decoding_data=decoding_data)
            title = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            caption = (f'Based on N={decoding_data["N"].squeeze()} '
                       f'subjects. Standard error and confidence interval '
                       f'of the mean were bootstrapped with {cfg.n_boot} '
                       f'resamples.')
            report.add_figure(
                fig=fig,
                title=title,
                caption=caption,
                tags=('decoding', 'contrast',
                      f'{config.sanitize_cond_name(cond_1)} – '
                      f'{config.sanitize_cond_name(cond_2)}'.lower().replace(
                          ' ', '-')))
            plt.close(fig)
            del decoding_data, cond_1, cond_2, caption, title

    #######################################################################
    #
    # Visualize forward solution, inverse operator, and inverse solutions.
    #

    for condition, evoked in zip(conditions, evokeds):
        if condition in cfg.conditions:
            title = f'Average: {condition}'
            cond_str = config.sanitize_cond_name(condition)
            tags = ('source-estimate',
                    config.sanitize_cond_name(condition).lower().replace(
                        ' ', ''))
        else:  # It's a contrast of two conditions.
            # XXX Will change once we process contrasts here too
            continue

        fname_stc_avg = evoked_fname.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}',
            extension=None)

        if Path(f'{fname_stc_avg.fpath}-lh.stc').exists():
            report.add_stc(stc=fname_stc_avg,
                           title=title,
                           subject='fsaverage',
                           subjects_dir=cfg.fs_subjects_dir,
                           tags=tags)

    fname_report = evoked_fname.copy().update(task=cfg.task,
                                              suffix='report',
                                              extension='.html')
    report.save(fname=fname_report, open_browser=False, overwrite=True)

    plt.close('all')  # close all figures to save memory
Beispiel #11
0
def run_report_source(*,
                      cfg: SimpleNamespace,
                      subject: str,
                      session: Optional[str] = None,
                      report: mne.Report) -> mne.Report:
    import matplotlib.pyplot as plt

    msg = 'Generating source-space analysis report …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    if report is None:
        report = _gen_empty_report(cfg=cfg, subject=subject, session=session)

    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         extension='.fif',
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    # Use this as a source for the Info dictionary
    fname_info = bids_path.copy().update(processing='clean', suffix='epo')

    fname_trans = bids_path.copy().update(suffix='trans')
    if not fname_trans.fpath.exists():
        msg = 'No coregistration found, skipping source space report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))
        return report

    ###########################################################################
    #
    # Visualize the coregistration & inverse solutions.
    #

    if cfg.conditions is None:
        conditions = []
    elif isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    msg = 'Rendering MRI slices with BEM contours.'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))
    report.add_bem(subject=cfg.fs_subject,
                   subjects_dir=cfg.fs_subjects_dir,
                   title='BEM',
                   width=256,
                   decim=8)

    msg = 'Rendering sensor alignment (coregistration).'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))
    report.add_trans(
        trans=fname_trans,
        info=fname_info,
        title='Sensor alignment',
        subject=cfg.fs_subject,
        subjects_dir=cfg.fs_subjects_dir,
    )

    for condition in conditions:
        msg = f'Rendering inverse solution for {condition}'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

        if condition in cfg.conditions:
            title = f'Source: {config.sanitize_cond_name(condition)}'
        else:  # It's a contrast of two conditions.
            # XXX Will change once we process contrasts here too
            continue

        method = cfg.inverse_method
        cond_str = config.sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.

        fname_stc = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None)

        tags = ('source-estimate', condition.lower().replace(' ', '-'))
        if Path(f'{fname_stc.fpath}-lh.stc').exists():
            report.add_stc(stc=fname_stc,
                           title=title,
                           subject=cfg.fs_subject,
                           subjects_dir=cfg.fs_subjects_dir,
                           tags=tags)

    plt.close('all')  # close all figures to save memory
    return report
Beispiel #12
0
def run_report_sensor(*,
                      cfg: SimpleNamespace,
                      subject: str,
                      session: Optional[str] = None,
                      report: mne.Report) -> mne.Report:
    import matplotlib.pyplot as plt

    msg = 'Generating sensor-space analysis report …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    if report is None:
        report = _gen_empty_report(cfg=cfg, subject=subject, session=session)

    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         extension='.fif',
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)
    fname_epo_clean = bids_path.copy().update(processing='clean', suffix='epo')
    fname_ave = bids_path.copy().update(suffix='ave')
    fname_decoding = bids_path.copy().update(processing=None,
                                             suffix='decoding',
                                             extension='.mat')
    fname_tfr_pow = bids_path.copy().update(suffix='power+condition+tfr',
                                            extension='.h5')

    ###########################################################################
    #
    # Visualize evoked responses.
    #
    if cfg.conditions is None:
        conditions = []
    elif isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    if conditions:
        evokeds = mne.read_evokeds(fname_ave)
    else:
        evokeds = []

    if evokeds:
        msg = (f'Adding {len(conditions)} evoked signals and contrasts to the '
               f'report.')
    else:
        msg = 'No evoked conditions or contrasts found.'

    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    for condition, evoked in zip(conditions, evokeds):
        if cfg.analyze_channels:
            evoked.pick(cfg.analyze_channels)

        if condition in cfg.conditions:
            title = f'Condition: {condition}'
            tags = ('evoked', condition.lower().replace(' ', '-'))
        else:  # It's a contrast of two conditions.
            title = f'Contrast: {condition[0]} – {condition[1]}'
            tags = ('evoked', 'contrast',
                    f"{condition[0].lower().replace(' ', '-')}-"
                    f"{condition[1].lower().replace(' ', '-')}")

        report.add_evokeds(evokeds=evoked, titles=title, tags=tags)

    ###########################################################################
    #
    # Visualize decoding results.
    #
    if cfg.decode:
        msg = 'Adding time-by-time decoding results to the report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

        epochs = mne.read_epochs(fname_epo_clean)

        for contrast in cfg.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{cfg.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = (fname_decoding.copy().update(
                processing=processing))
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores(
                times=epochs.times,
                cross_val_scores=decoding_data['scores'],
                metric=cfg.decoding_metric)

            title = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            caption = (f'{len(epochs[cond_1])} × {cond_1} ./. '
                       f'{len(epochs[cond_2])} × {cond_2}')
            tags = ('epochs', 'contrast',
                    f"{contrast[0].lower().replace(' ', '-')}-"
                    f"{contrast[1].lower().replace(' ', '-')}")

            report.add_figure(fig=fig, title=title, caption=caption, tags=tags)
            plt.close(fig)
            del decoding_data, cond_1, cond_2, title, caption

        del epochs

    ###########################################################################
    #
    # Visualize TFR as topography.
    #
    if cfg.time_frequency_conditions is None:
        conditions = []
    elif isinstance(cfg.time_frequency_conditions, dict):
        conditions = list(cfg.time_frequency_conditions.keys())
    else:
        conditions = cfg.time_frequency_conditions.copy()

    if conditions:
        msg = 'Adding TFR analysis results to the report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

    for condition in conditions:
        cond = config.sanitize_cond_name(condition)
        fname_tfr_pow_cond = str(fname_tfr_pow.copy()).replace(
            "+condition+", f"+{cond}+")
        power = mne.time_frequency.read_tfrs(fname_tfr_pow_cond)
        fig = power[0].plot_topo(show=False,
                                 fig_facecolor='w',
                                 font_color='k',
                                 border='k')
        report.add_figure(fig=fig,
                          title=f'TFR: {condition}',
                          caption=f'TFR Power: {condition}',
                          tags=('time-frequency',
                                condition.lower().replace(' ', '-')))
        plt.close(fig)

    return report