Пример #1
0
def add_event_counts(*, cfg, session: str, report: mne.Report) -> None:
    try:
        df_events = count_events(BIDSPath(root=cfg.bids_root, session=session))
    except ValueError:
        logger.warning('Could not read events.')
        df_events = None

    if df_events is not None:
        css_classes = ('table', 'table-striped', 'table-borderless',
                       'table-hover')
        report.add_htmls_to_section(
            f'<div class="event-counts">\n'
            f'{df_events.to_html(classes=css_classes, border=0)}\n'
            f'</div>',
            captions='Event counts',
            section='events')
        css = ('.event-counts {\n'
               '  display: -webkit-box;\n'
               '  display: -ms-flexbox;\n'
               '  display: -webkit-flex;\n'
               '  display: flex;\n'
               '  justify-content: center;\n'
               '  text-align: center;\n'
               '}\n\n'
               'th, td {\n'
               '  text-align: center;\n'
               '}\n')
        report.add_custom_css(css)
Пример #2
0
def generate_report(raw, ica, report_savepath):
    logger.info("Generatingg report")
    report = Report(verbose=False)

    fig_topo = ica.plot_components(picks=range(ica.n_components_), show=False)
    report.add_figs_to_section(fig_topo, section="ICA", captions="Timeseries")
    report.save(report_savepath, overwrite=True, open_browser=False)
Пример #3
0
def get_report(subject):
    """Get a Report object for a subject.

    If the Report had been saved (pickle'd) before, load it. Otherwise,
    construct a new one.
    """
    report_fname = fname.report(subject=subject)
    if os.path.exists(report_fname):
        with open(report_fname, 'rb') as f:
            return pickle.load(f)
    else:
        return Report(subjects_dir=fname.subjects_dir,
                      subject=subject,
                      title='Analysis for %s' % subject)
Пример #4
0
def make_report(subject, subjects_dir, meg_filename, output_dir):
    #Create report from output
    report = Report(image_format='png', subjects_dir=subjects_dir,
                   subject=subject,  
                    raw_psd=False)  # use False for speed here
    #info_fname=meg_filename,  
    
    report.parse_folder(output_dir, on_error='ignore', mri_decim=10)
    report_filename = op.join(output_dir, 'QA_report.html')
    report.save(report_filename)
Пример #5
0
def detect_bad_components(*, cfg, which: Literal['eog', 'ecg'],
                          epochs: mne.BaseEpochs, ica: mne.preprocessing.ICA,
                          subject: str, session: str,
                          report: mne.Report) -> List[int]:
    evoked = epochs.average()

    artifact = which.upper()
    msg = f'Performing automated {artifact} artifact detection …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    if which == 'eog':
        inds, scores = ica.find_bads_eog(epochs,
                                         threshold=cfg.ica_eog_threshold)
    else:
        inds, scores = ica.find_bads_ecg(epochs,
                                         method='ctps',
                                         threshold=cfg.ica_ctps_ecg_threshold)

    if not inds:
        adjust_setting = ('ica_eog_threshold'
                          if which == 'eog' else 'ica_ctps_ecg_threshold')
        warn = (f'No {artifact}-related ICs detected, this is highly '
                f'suspicious. A manual check is suggested. You may wish to '
                f'lower "{adjust_setting}".')
        logger.warning(
            **gen_log_kwargs(message=warn, subject=subject, session=session))
    else:
        msg = (f'Detected {len(inds)} {artifact}-related ICs in '
               f'{len(epochs)} {artifact} epochs.')
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

    # Mark the artifact-related components for removal
    ica.exclude = inds

    # Plot scores
    fig = ica.plot_scores(scores, labels=which, show=cfg.interactive)
    report.add_figs_to_section(figs=fig,
                               captions=f'Scores - {artifact}',
                               section=f'sub-{subject}')

    # Plot source time course
    fig = ica.plot_sources(evoked, show=cfg.interactive)
    report.add_figs_to_section(figs=fig,
                               captions=f'Source time course - {artifact}',
                               section=f'sub-{subject}')

    # Plot original & corrected data
    fig = ica.plot_overlay(evoked, show=cfg.interactive)
    report.add_figs_to_section(figs=fig,
                               captions=f'Corrections - {artifact}',
                               section=f'sub-{subject}')

    return inds
Пример #6
0
def generate_report(subj):
    report = Report(subject=subj.name, title=subj.name)

    if subj.name == "sub-emptyroom":
        report.parse_folder(str(subj), render_bem=False)
    else:
        report.parse_folder(str(subj / "meg"), render_bem=False)

    # process head positions
    if subj.name != "sub-emptyroom":
        report = add_head_postions(subj, report)

    # add bad channels
    report = add_bad_channels(subj, report)

    # add maxwell filtering part
    report = add_maxwell_filtering_figures(subj, report)

    # -------- save results -------- #
    dest_dir = dirs.reports / subj.name
    dest_dir.mkdir(exist_ok=True)
    savename = dest_dir / (subj.name + "-report.html")
    report.save(str(savename), open_browser=False, overwrite=True)
Пример #7
0
matplotlib.rcParams.update({'figure.max_open_warning': 0})

# Ask for subject IDs to analyze
sub_list = get_sub_list(deriv_dir, allow_all=True)
for sub_string in sub_list:

    # SUBJECT INFORMATION DEFINITION
    # Define the Subject ID and paths
    deriv_path = deriv_dir / sub_string
    fig_path = deriv_path / 'figures'
    print(f'Generating report for task-{task} data from {sub_string}')

    # Initialize the report
    # Make the report object
    report = Report(subject=sub_string,
                    title=f'{sub_string}: task-{task} report',
                    image_format='png', verbose=True, projs=False,
                    subjects_dir=None)

    # Behavioral Section
    # Plot behavioral data
    behav_fig_file = fig_path / f'{sub_string}_task-{task}_beh_performance.png'
    report.add_images_to_section(behav_fig_file,
                                 captions='Behavior: Performance Summary',
                                 section='Behavior')

    # EEG Section
    # Load the Raw data
    raw_fif_file = deriv_path / \
        f'{sub_string}_task-{task}_ref-FCz_desc-resamp_raw.fif.gz'
    raw = mne.io.read_raw_fif(raw_fif_file, preload=True)
def make_report(subject_id):
    subject = "sub%03d" % subject_id
    print("processing %s" % subject)

    meg_path = op.join(meg_dir, subject)
    ave_fname = op.join(meg_path, "%s-ave.fif" % subject)

    rep = Report(info_fname=ave_fname, subject=subject,
                 subjects_dir=subjects_dir)
    rep.parse_folder(meg_path)

    evokeds = mne.read_evokeds(op.join(meg_path, '%s-ave.fif' % subject))
    fam = evokeds[0]
    scramb = evokeds[1]
    unfam = evokeds[2]

    figs = list()
    captions = list()

    fig = fam.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Famous faces')

    fig = unfam.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Unfamiliar faces')

    fig = scramb.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Scrambled faces')

    if 'EEG070' in fam.ch_names:
        idx = fam.ch_names.index('EEG070')

        fig = mne.viz.plot_compare_evokeds({'Famous': fam, 'Unfamiliar': unfam,
                                            'Scrambled': scramb}, idx,
                                           show=False)
        figs.append(fig)

        captions.append('Famous, unfamliliar and scrambled faces on EEG070')

    fname_trans = op.join(study_path, 'ds117', subject, 'MEG',
                          '%s-trans.fif' % subject)
    mne.viz.plot_trans(fam.info, fname_trans, subject=subject,
                       subjects_dir=subjects_dir, meg_sensors=True,
                       eeg_sensors=True)
    fig = mlab.gcf()
    figs.append(fig)
    captions.append('Coregistration')

    rep.add_figs_to_section(figs, captions)
    for cond in ['faces', 'famous', 'unfamiliar', 'scrambled', 'contrast']:
        fname = op.join(meg_path, 'mne_dSPM_inverse-%s' % cond)
        stc = mne.read_source_estimate(fname, subject)
        brain = stc.plot(views=['ven'], hemi='both')

        brain.set_data_time_index(112)

        fig = mlab.gcf()
        rep._add_figs_to_section(fig, cond)

    rep.save(fname=op.join(meg_dir, 'report%s.html' % subject),
             open_browser=False, overwrite=True)
        fname = op.join(meg_path, 'mne_dSPM_inverse-%s' % cond)
        stc = mne.read_source_estimate(fname, subject)
        brain = stc.plot(views=['ven'], hemi='both')

        brain.set_data_time_index(112)

        fig = mlab.gcf()
        rep._add_figs_to_section(fig, cond)

    rep.save(fname=op.join(meg_dir, 'report%s.html' % subject),
             open_browser=False, overwrite=True)


# Group report
faces_fname = op.join(meg_dir, 'eeg_faces-ave.fif')
rep = Report(info_fname=faces_fname, subject='fsaverage',
             subjects_dir=subjects_dir)
faces = mne.read_evokeds(faces_fname)[0]
rep.add_figs_to_section(faces.plot(spatial_colors=True, gfp=True, show=False),
                        'Average faces')

scrambled = mne.read_evokeds(op.join(meg_dir, 'eeg_scrambled-ave.fif'))[0]
rep.add_figs_to_section(scrambled.plot(spatial_colors=True, gfp=True,
                                       show=False), 'Average scrambled')

fname = op.join(meg_dir, 'contrast-average')
stc = mne.read_source_estimate(fname, subject='fsaverage')
brain = stc.plot(views=['ven'], hemi='both', subject='fsaverage',
                 subjects_dir=subjects_dir)
brain.set_data_time_index(165)

fig = mlab.gcf()
Пример #10
0
# Ask for subject IDs to analyze
sub_list = get_sub_list(deriv_dir, allow_all=True)
for sub in sub_list:

    # SUBJECT INFORMATION DEFINITION
    # Define the Subject ID and paths
    deriv_path = deriv_dir / sub
    fig_path = deriv_path / 'figures'
    print(f'Generating report for task-{task} data from {sub}')

    # Initialize the report
    # Make the report object
    report = Report(subject=sub,
                    title=f'{sub}: task-{task} report',
                    image_format='png',
                    verbose=True,
                    projs=False,
                    subjects_dir=None)

    # Behavioral Section
    # Plot behavioral data
    behav_fig_file = fig_path / f'{sub}_task-{task}_beh_performance.png'
    report.add_images_to_section(behav_fig_file,
                                 captions='Behavior: Performance Summary',
                                 section='Behavior')

    # EEG Section
    # Load the Raw data
    raw_fif_file = deriv_path / \
        f'{sub}_task-{task}_ref-FCz_desc-resamp_raw.fif.gz'
    raw = mne.io.read_raw_fif(raw_fif_file, preload=True)
Пример #11
0
def run_report_source(*,
                      cfg: SimpleNamespace,
                      subject: str,
                      session: Optional[str] = None,
                      report: mne.Report) -> mne.Report:
    import matplotlib.pyplot as plt

    msg = 'Generating source-space analysis report …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    if report is None:
        report = _gen_empty_report(cfg=cfg, subject=subject, session=session)

    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         extension='.fif',
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)

    # Use this as a source for the Info dictionary
    fname_info = bids_path.copy().update(processing='clean', suffix='epo')

    fname_trans = bids_path.copy().update(suffix='trans')
    if not fname_trans.fpath.exists():
        msg = 'No coregistration found, skipping source space report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))
        return report

    ###########################################################################
    #
    # Visualize the coregistration & inverse solutions.
    #

    if cfg.conditions is None:
        conditions = []
    elif isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    msg = 'Rendering MRI slices with BEM contours.'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))
    report.add_bem(subject=cfg.fs_subject,
                   subjects_dir=cfg.fs_subjects_dir,
                   title='BEM',
                   width=256,
                   decim=8)

    msg = 'Rendering sensor alignment (coregistration).'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))
    report.add_trans(
        trans=fname_trans,
        info=fname_info,
        title='Sensor alignment',
        subject=cfg.fs_subject,
        subjects_dir=cfg.fs_subjects_dir,
    )

    for condition in conditions:
        msg = f'Rendering inverse solution for {condition}'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

        if condition in cfg.conditions:
            title = f'Source: {config.sanitize_cond_name(condition)}'
        else:  # It's a contrast of two conditions.
            # XXX Will change once we process contrasts here too
            continue

        method = cfg.inverse_method
        cond_str = config.sanitize_cond_name(condition)
        inverse_str = method
        hemi_str = 'hemi'  # MNE will auto-append '-lh' and '-rh'.

        fname_stc = bids_path.copy().update(
            suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None)

        tags = ('source-estimate', condition.lower().replace(' ', '-'))
        if Path(f'{fname_stc.fpath}-lh.stc').exists():
            report.add_stc(stc=fname_stc,
                           title=title,
                           subject=cfg.fs_subject,
                           subjects_dir=cfg.fs_subjects_dir,
                           tags=tags)

    plt.close('all')  # close all figures to save memory
    return report
Пример #12
0
def run_report_sensor(*,
                      cfg: SimpleNamespace,
                      subject: str,
                      session: Optional[str] = None,
                      report: mne.Report) -> mne.Report:
    import matplotlib.pyplot as plt

    msg = 'Generating sensor-space analysis report …'
    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    if report is None:
        report = _gen_empty_report(cfg=cfg, subject=subject, session=session)

    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=cfg.task,
                         acquisition=cfg.acq,
                         run=None,
                         recording=cfg.rec,
                         space=cfg.space,
                         extension='.fif',
                         datatype=cfg.datatype,
                         root=cfg.deriv_root,
                         check=False)
    fname_epo_clean = bids_path.copy().update(processing='clean', suffix='epo')
    fname_ave = bids_path.copy().update(suffix='ave')
    fname_decoding = bids_path.copy().update(processing=None,
                                             suffix='decoding',
                                             extension='.mat')
    fname_tfr_pow = bids_path.copy().update(suffix='power+condition+tfr',
                                            extension='.h5')

    ###########################################################################
    #
    # Visualize evoked responses.
    #
    if cfg.conditions is None:
        conditions = []
    elif isinstance(cfg.conditions, dict):
        conditions = list(cfg.conditions.keys())
    else:
        conditions = cfg.conditions.copy()

    conditions.extend(cfg.contrasts)

    if conditions:
        evokeds = mne.read_evokeds(fname_ave)
    else:
        evokeds = []

    if evokeds:
        msg = (f'Adding {len(conditions)} evoked signals and contrasts to the '
               f'report.')
    else:
        msg = 'No evoked conditions or contrasts found.'

    logger.info(
        **gen_log_kwargs(message=msg, subject=subject, session=session))

    for condition, evoked in zip(conditions, evokeds):
        if cfg.analyze_channels:
            evoked.pick(cfg.analyze_channels)

        if condition in cfg.conditions:
            title = f'Condition: {condition}'
            tags = ('evoked', condition.lower().replace(' ', '-'))
        else:  # It's a contrast of two conditions.
            title = f'Contrast: {condition[0]} – {condition[1]}'
            tags = ('evoked', 'contrast',
                    f"{condition[0].lower().replace(' ', '-')}-"
                    f"{condition[1].lower().replace(' ', '-')}")

        report.add_evokeds(evokeds=evoked, titles=title, tags=tags)

    ###########################################################################
    #
    # Visualize decoding results.
    #
    if cfg.decode:
        msg = 'Adding time-by-time decoding results to the report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

        epochs = mne.read_epochs(fname_epo_clean)

        for contrast in cfg.contrasts:
            cond_1, cond_2 = contrast
            a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '')
            processing = f'{a_vs_b}+{cfg.decoding_metric}'
            processing = processing.replace('_', '-').replace('-', '')
            fname_decoding_ = (fname_decoding.copy().update(
                processing=processing))
            decoding_data = loadmat(fname_decoding_)
            del fname_decoding_, processing, a_vs_b

            fig = plot_decoding_scores(
                times=epochs.times,
                cross_val_scores=decoding_data['scores'],
                metric=cfg.decoding_metric)

            title = f'Time-by-time Decoding: {cond_1} ./. {cond_2}'
            caption = (f'{len(epochs[cond_1])} × {cond_1} ./. '
                       f'{len(epochs[cond_2])} × {cond_2}')
            tags = ('epochs', 'contrast',
                    f"{contrast[0].lower().replace(' ', '-')}-"
                    f"{contrast[1].lower().replace(' ', '-')}")

            report.add_figure(fig=fig, title=title, caption=caption, tags=tags)
            plt.close(fig)
            del decoding_data, cond_1, cond_2, title, caption

        del epochs

    ###########################################################################
    #
    # Visualize TFR as topography.
    #
    if cfg.time_frequency_conditions is None:
        conditions = []
    elif isinstance(cfg.time_frequency_conditions, dict):
        conditions = list(cfg.time_frequency_conditions.keys())
    else:
        conditions = cfg.time_frequency_conditions.copy()

    if conditions:
        msg = 'Adding TFR analysis results to the report.'
        logger.info(
            **gen_log_kwargs(message=msg, subject=subject, session=session))

    for condition in conditions:
        cond = config.sanitize_cond_name(condition)
        fname_tfr_pow_cond = str(fname_tfr_pow.copy()).replace(
            "+condition+", f"+{cond}+")
        power = mne.time_frequency.read_tfrs(fname_tfr_pow_cond)
        fig = power[0].plot_topo(show=False,
                                 fig_facecolor='w',
                                 font_color='k',
                                 border='k')
        report.add_figure(fig=fig,
                          title=f'TFR: {condition}',
                          caption=f'TFR Power: {condition}',
                          tags=('time-frequency',
                                condition.lower().replace(' ', '-')))
        plt.close(fig)

    return report
Пример #13
0
def make_report(subject_id):
    subject = "sub%03d" % subject_id
    print("processing %s" % subject)

    meg_path = op.join(meg_dir, subject)
    ave_fname = op.join(meg_path,
                        "%s_highpass-%sHz-ave.fif" % (subject, l_freq))

    rep = Report(info_fname=ave_fname,
                 subject=subject,
                 subjects_dir=subjects_dir)
    rep.parse_folder(meg_path)

    evokeds = mne.read_evokeds(ave_fname)
    fam = evokeds[0]
    scramb = evokeds[1]
    unfam = evokeds[2]

    figs = list()
    captions = list()

    fig = fam.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Famous faces')

    fig = unfam.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Unfamiliar faces')

    fig = scramb.plot(spatial_colors=True, show=False, gfp=True)
    figs.append(fig)
    captions.append('Scrambled faces')

    if 'EEG070' in fam.ch_names:
        idx = fam.ch_names.index('EEG070')

        fig = mne.viz.plot_compare_evokeds(
            {
                'Famous': fam,
                'Unfamiliar': unfam,
                'Scrambled': scramb
            },
            idx,
            show=False)
        figs.append(fig)

        captions.append('Famous, unfamliliar and scrambled faces on EEG070')

    fname_trans = op.join(study_path, 'ds117', subject, 'MEG',
                          '%s-trans.fif' % subject)
    mne.viz.plot_trans(fam.info,
                       fname_trans,
                       subject=subject,
                       subjects_dir=subjects_dir,
                       meg_sensors=True,
                       eeg_sensors=True)
    fig = mlab.gcf()
    figs.append(fig)
    captions.append('Coregistration')

    rep.add_figs_to_section(figs, captions)
    for cond in ['faces', 'famous', 'unfamiliar', 'scrambled', 'contrast']:
        fname = op.join(meg_path,
                        'mne_dSPM_inverse_highpass-%sHz-%s' % (l_freq, cond))
        stc = mne.read_source_estimate(fname, subject)
        brain = stc.plot(views=['ven'], hemi='both')

        brain.set_data_time_index(112)

        fig = mlab.gcf()
        rep._add_figs_to_section(fig, cond)

    rep.save(fname=op.join(meg_dir, 'report%s.html' % subject),
             open_browser=False,
             overwrite=True)
Пример #14
0
        brain = stc.plot(views=['ven'], hemi='both')

        brain.set_data_time_index(112)

        fig = mlab.gcf()
        rep._add_figs_to_section(fig, cond)

    rep.save(fname=op.join(meg_dir, 'report%s.html' % subject),
             open_browser=False,
             overwrite=True)


# Group report
faces_fname = op.join(meg_dir, 'eeg_faces_highpass-%sHz-ave.fif' % l_freq)
rep = Report(info_fname=faces_fname,
             subject='fsaverage',
             subjects_dir=subjects_dir)
faces = mne.read_evokeds(faces_fname)[0]
rep.add_figs_to_section(faces.plot(spatial_colors=True, gfp=True, show=False),
                        'Average faces')

scrambled = mne.read_evokeds(op.join(meg_dir, 'eeg_scrambled-ave.fif'))[0]
rep.add_figs_to_section(
    scrambled.plot(spatial_colors=True, gfp=True, show=False),
    'Average scrambled')

fname = op.join(meg_dir, 'contrast-average_highpass-%sHz' % l_freq)
stc = mne.read_source_estimate(fname, subject='fsaverage')
brain = stc.plot(views=['ven'],
                 hemi='both',
                 subject='fsaverage',
Пример #15
0
# Ask for subject IDs to analyze
sub_list = get_sub_list(deriv_dir, allow_all=True)
for sub in sub_list:

    # SUBJECT INFORMATION DEFINITION
    # Define the Subject ID and paths
    deriv_path = deriv_dir / sub
    fig_path = deriv_path / 'figures'
    print(f'Generating report for task-{task} data from {sub}')

    # Initialize the report
    # Make the report object
    report = Report(subject=sub,
                    title=f'{sub}: task-{task} report',
                    image_format='png',
                    verbose=True,
                    projs=False,
                    subjects_dir=None)

    # Behavioral Section
    # Plot behavioral data
    behav_fig_file = fig_path / f'{sub}_task-{task}_beh_performance.png'
    report.add_images_to_section(behav_fig_file,
                                 captions='Behavior: Performance Summary',
                                 section='Behavior')

    # EEG Section
    # Load the Raw data
    raw_fif_file = deriv_path / \
        f'{sub}_task-{task}_ref-FCz_desc-resamp_raw.fif.gz'
    raw = mne.io.read_raw_fif(raw_fif_file, preload=True)