예제 #1
0
def report_flm_fiac():  # pragma: no cover
    data = nistats_datasets.fetch_fiac_first_level()
    fmri_img = [data['func1'], data['func2']]

    from nilearn.image import mean_img
    mean_img_ = mean_img(fmri_img[0])

    design_files = [data['design_matrix1'], data['design_matrix2']]
    design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

    fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
    fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

    n_columns = design_matrices[0].shape[1]

    contrasts = {
        'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
        'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
        'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
        'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
        'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
        'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
        'Deactivation': _pad_vector([-1, -1, -1, -1, 4], n_columns),
        'Effects_of_interest': np.eye(n_columns)[:5]
    }
    report = make_glm_report(
        fmri_glm,
        contrasts,
        bg_img=mean_img_,
        height_control='fdr',
    )
    output_filename = 'generated_report_flm_fiac.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
예제 #2
0
def report_slm_oasis():  # pragma: no cover
    n_subjects = 5  # more subjects requires more memory
    oasis_dataset = nilearn.datasets.fetch_oasis_vbm(n_subjects=n_subjects)
    # Resample the images, since this mask has a different resolution
    mask_img = resample_to_img(
        nilearn.datasets.fetch_icbm152_brain_gm_mask(),
        oasis_dataset.gray_matter_maps[0],
        interpolation='nearest',
    )
    design_matrix = _make_design_matrix_slm_oasis(oasis_dataset, n_subjects)
    second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask=mask_img)
    second_level_model.fit(oasis_dataset.gray_matter_maps,
                           design_matrix=design_matrix)

    contrast = [[1, 0, 0], [0, 1, 0]]
    report = make_glm_report(
        model=second_level_model,
        contrasts=contrast,
        bg_img=nilearn.datasets.fetch_icbm152_2009()['t1'],
        height_control=None,
    )
    output_filename = 'generated_report_slm_oasis.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
def compute_group_z_map(second_level_input, n_sub, output_pathway):
    # Model the effect of conditions (sample 1 vs sample 2).
    condition_effect = np.hstack(([1] * n_sub, [- 1] * n_sub))

    # Model the subject effect:
    # each subject is observed in sample 1 and sample 2.
    subject_effect = np.vstack((np.eye(n_sub), np.eye(n_sub)))
    subjects = ['S%02d' % i for i in range(1, n_sub + 1)]

    # We then assemble those in a design matrix and...
    design_matrix = pd.DataFrame(
        np.hstack((condition_effect[:, np.newaxis], subject_effect)),
        columns=['Story vs. Math'] + subjects)

    # ... plot the design_matrix.
    plot_design_matrix(design_matrix, output_file=
                       os.path.join(output_pathway,
                                    'design_matrix_story_math.png'))

    # Specify the analysis model and fit it
    second_level_model = SecondLevelModel().fit(second_level_input,
                                                design_matrix=design_matrix)

    # Estimate the contrast
    z_map = second_level_model.compute_contrast('Story vs. Math',
                                                output_type='z_score')

    # Report of the GLM
    report = make_glm_report(second_level_model,
                             contrasts='Story vs. Math',
                             title='Group-Level HCP900 Story vs.Math Report',
                             cluster_threshold=5,
                             height_control='fdr',
                             min_distance=8.,
                             plot_type='glass',
    )

    report.save_as_html(os.path.join(output_pathway, 'report.html'))

    # Save contrast nifti-file
    z_map.to_filename(os.path.join(output_pathway,
                                   'group_hcplang900_story_math.nii.gz'))

    # Plot contrast
    threshold = 3.1  # correponds to  p < .001, uncorrected
    display = plotting.plot_glass_brain(z_map, threshold=threshold,
                                        colorbar=True,
                                        plot_abs=False,
                                        title='Story vs. Math (unc p<0.001)',
                                        output_file=os.path.join(
                                        output_pathway,
                                        'group_hcplang900_story_math'))

    return z_map
예제 #4
0
def _gen_report():
    """ Generate an empty HTMLReport for testing """

    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
    mask, fmri_data, design_matrices = _write_fake_fmri_data(shapes, rk)
    flm = FirstLevelModel(mask_img=mask).fit(
            fmri_data, design_matrices=design_matrices)
    contrast = np.eye(3)[1]
    report = make_glm_report(flm, contrast, plot_type='glass',
                             height_control=None, min_distance=15,
                             alpha=0.001, threshold=2.78,
                             )
    return report
예제 #5
0
def report_flm_bids_features():  # pragma: no cover
    data_dir = _fetch_bids_data()
    model, subject = _make_flm(data_dir)
    title = 'FLM Bids Features Stat maps'
    report = make_glm_report(
        model=model,
        contrasts='StopSuccess - Go',
        title=title,
        cluster_threshold=3,
    )
    output_filename = 'generated_report_flm_bids_features.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
예제 #6
0
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords],
                                     radius=10,
                                     detrend=True,
                                     standardize=True,
                                     low_pass=0.1,
                                     high_pass=0.01,
                                     t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1,
                                     verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes,
                                                   hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)

    report = make_glm_report(
        first_level_model,
        contrasts=contrasts,
        title='ADHD DMN Report',
        cluster_threshold=15,
        height_control='bonferroni',
        min_distance=8.,
        plot_type='glass',
        report_dims=(1200, 'a'),
    )
    output_filename = 'generated_report_flm_adhd_dmn.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
예제 #7
0
display = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',
                                 cut_coords=pcc_coords)
display.add_markers(marker_coords=[pcc_coords], marker_color='g',marker_size=300)
display.savefig(filename)
print("Save z-map in '{0}'.".format(filename))

###########################################################################
# Generating a report
# -------------------
# It can be useful to quickly generate a
# portable, ready-to-view report with most of the pertinent information.
# This is easy to do if you have a fitted model and the list of contrasts,
# which we do here.

from nistats.reporting import make_glm_report

report = make_glm_report(first_level_model,
                         contrasts=contrasts,
                         title='ADHD DMN Report',
                         cluster_threshold=15,
                         min_distance=8.,
                         plot_type='glass',
                         )

#########################################################################
# We have several ways to access the report:

# report  # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
                                  output_type='z_score')
plotting.plot_stat_map(z_map,
                       bg_img=mean_img_,
                       threshold=3.0,
                       title='%s, fixed effects' % contrast_id)

plotting.show()

#########################################################################
# Not unexpectedly, the fixed effects version displays higher peaks than the input sessions. Computing fixed effects enhances the signal-to-noise ratio of the resulting brain maps.

#########################################################################
# Generating a report
# -------------------
# Since we have already computed the FirstLevelModel and
# and have the contrast, we can quickly create a summary report.
from nistats.reporting import make_glm_report

report = make_glm_report(
    fmri_glm,
    contrasts,
    bg_img=mean_img_,
)

#########################################################################
# We have several ways to access the report:

# report  # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
예제 #9
0
                       colorbar=True,
                       title='sex effect on grey matter density (FDR = .05)')

###########################################################################
# Note that there does not seem to be any significant effect of sex on
# grey matter density on that dataset.

###########################################################################
# Generating a report
# -------------------
# It can be useful to quickly generate a
# portable, ready-to-view report with most of the pertinent information.
# This is easy to do if you have a fitted model and the list of contrasts,
# which we do here.

from nistats.reporting import make_glm_report

icbm152_2009 = datasets.fetch_icbm152_2009()
report = make_glm_report(
    model=second_level_model,
    contrasts=['age', 'sex'],
    bg_img=icbm152_2009['t1'],
)

#########################################################################
# We have several ways to access the report:

# report  # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
예제 #10
0
                          threshold=norm.isf(0.001),
                          plot_abs=False,
                          display_mode='z',
                          figure=plt.figure(figsize=(4, 4)))
plt.show()

###############################################################################
# We can get a latex table from a Pandas Dataframe for display and publication purposes.
from nistats.reporting import get_clusters_table
print(get_clusters_table(z_map, norm.isf(0.001), 10).to_latex())

#########################################################################
# Generating a report
# -------------------
# Using the computed FirstLevelModel and contrast information,
# we can quickly create a summary report.

from nistats.reporting import make_glm_report

report = make_glm_report(
    model=model,
    contrasts='StopSuccess - Go',
)

#########################################################################
# We have several ways to access the report:

# report  # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
        z_maps.append(glm.compute_contrast(condition_))
        condition_idx.append(condition_)
        session_idx.append(session)

#########################################################################
# Generating a report
# -------------------
# Since we have already computed the FirstLevelModel
# and have the contrast, we can quickly create a summary report.
from nilearn.image import mean_img
from nistats.reporting import make_glm_report

mean_img_ = mean_img(func_filename)
report = make_glm_report(
    glm,
    contrasts=conditions,
    bg_img=mean_img_,
)

#############################################################################
# In a jupyter notebook, the report will be automatically inserted, as above.
# We have several other ways to access the report:

# report  # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()

#############################################################################
# Transform the maps to an array of values
# ----------------------------------------
from nilearn.input_data import NiftiMasker
예제 #12
0
def first_level(subject):
    subject_id = subject['subject_id']
    data_dir = subject['output_dir']
    subject_session_output_dir = os.path.join(data_dir, 'res_stats')
    if not os.path.exists(subject_session_output_dir):
             os.makedirs(subject_session_output_dir)    

    design_matrices=[]

    for e, i in enumerate(subject['func']) :
        
        # Parameters
        tr = subject['TR']
        drift_model = None
        hrf_model = 'spm'  # hemodynamic reponse function
        hfcut = 128.
        fwhm = [5, 5, 5]
        n_scans = nibabel.load(subject['func'][e]).shape[3]
 
        # Preparation of paradigm
        events_file = subject['onset'][e]
        paradigm = paradigm_contrasts.localizer_paradigm(events_file)
        
        # Motion parameter
        motion_path = subject['realignment_parameters'][e]
        motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
        motion = np.loadtxt(motion_path)
        
        
        # Build design matrix
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        design_matrix = make_first_level_design_matrix(
                frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model,
                high_pass=hfcut, add_regs=motion,
                add_reg_names=motion_names)
        _, dmtx, names = check_design_matrix(design_matrix)
        design_matrices.append(design_matrix)
        #print(names)
    
    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    # GLM Analysis
    print('Fitting a GLM (this takes time)...')    
    
    #for mask_img; use the False or the mask of t1 mni template
    #the computed mask by default on fmri seems not always correct.
    # For a specific mask, try this: 
    #mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    #mask = compute_epi_mask(fmri_f)
    #nibabel.save(mask , mask_path)
    #mask_images.append(compute_epi_mask(mask))
    
    fmri_glm = FirstLevelModel(mask_img=False, t_r=tr,
                               smoothing_fwhm=fwhm).fit(subject['func'], design_matrices=design_matrices)                                        
                
    # compute contrasts
    z_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)

        # store stat maps to disk
        for map_type in ['z_score', 'stat', 'effect_size', 'effect_variance']:
            stat_map = fmri_glm.compute_contrast(
                contrast_val, output_type=map_type)
            map_dir = os.path.join(
                subject_session_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            stat_map.to_filename(map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z_score':
                z_maps[contrast_id] = map_path

    anat_img = glob.glob(os.path.join(data_dir, 'anat/wsub*T1w.nii.gz'))[0]
    stats_report_filename = os.path.join(
        subject_session_output_dir, 'report_stats.html')

    report = make_glm_report(fmri_glm,
                             contrasts,
                             threshold=3.0,
                             bg_img=anat_img,
                             cluster_threshold=15,
                             title="GLM for subject %s" % subject_id,
                             )
    report.save_as_html(stats_report_filename)
                
    return z_maps
예제 #13
0
def first_level(subject_dic,
                additional_regressors=None,
                compcorr=False,
                smooth=None,
                surface=False,
                mask_img=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject

    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whether confound estimation and removal should be done or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    start_time = time.ctime()
    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = subject_dic['hrf_model']
    high_pass = subject_dic['high_pass']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']

    if not surface and (mask_img is None):
        mask_img = masking(subject_dic['func'], subject_dic['output_dir'])

    if additional_regressors is None:
        additional_regressors = dict([
            (session_id, None) for session_id in subject_dic['session_id']
        ])

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):

        task_id = _session_id_to_task_id([session_id])[0]

        if surface:
            from nibabel.gifti import read
            n_scans = np.array(
                [darrays.data for darrays in read(fmri_path).darrays]).shape[0]
        else:
            n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        if task_id == 'audio':
            mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1])
            n_cycles = 28
            cycle_duration = 20
            t_r = 2
            cycle = np.arange(0, cycle_duration, t_r)[mask > 0]
            frametimes = np.tile(cycle, n_cycles) +\
                np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum())
            frametimes = frametimes[:-2]  # for some reason...

        if surface:
            compcorr = False  # XXX Fixme

        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names
        else:
            confounds = motion
            confound_names = motion_names

        if onset is None:
            warnings.warn('Onset file not provided. Trying to guess it')
            task = os.path.basename(fmri_path).split('task')[-1][4:]
            onset = os.path.join(
                os.path.split(os.path.dirname(fmri_path))[0], 'model001',
                'onsets', 'task' + task + '_run001', 'task%s.csv' % task)

        if not os.path.exists(onset):
            warnings.warn('non-existant onset file. proceeding without it')
            paradigm = None
        else:
            paradigm = make_paradigm(onset, task_id)

        # handle manually supplied regressors
        add_reg_names = []
        if additional_regressors[session_id] is None:
            add_regs = confounds
        else:
            df = read_csv(additional_regressors[session_id])
            add_regs = []
            for regressor in df:
                add_reg_names.append(regressor)
                add_regs.append(df[regressor])
            add_regs = np.array(add_regs).T
            add_regs = np.hstack((add_regs, confounds))

        add_reg_names += confound_names

        # create the design matrix
        design_matrix = make_first_level_design_matrix(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            high_pass=high_pass,
            add_regs=add_regs,
            add_reg_names=add_reg_names)
        _, dmtx, names = check_design_matrix(design_matrix)

        # create the relevant contrasts
        contrasts = make_contrasts(task_id, names)

        if surface:
            if 'fsaverage5' in fmri_path:
                # this is low-resolution data
                subject_session_output_dir = os.path.join(
                    subject_dic['output_dir'],
                    'res_fsaverage5_%s' % session_id)
            else:
                subject_session_output_dir = os.path.join(
                    subject_dic['output_dir'], 'res_surf_%s' % session_id)
        else:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'),
                 design_matrix=design_matrix)

        if surface:
            run_surface_glm(design_matrix, contrasts, fmri_path,
                            subject_session_output_dir)
        else:
            z_maps, fmri_glm = run_glm(design_matrix,
                                       contrasts,
                                       fmri_path,
                                       mask_img,
                                       subject_dic,
                                       subject_session_output_dir,
                                       tr=tr,
                                       smoothing_fwhm=smooth)

            # do stats report
            anat_img = nib.load(subject_dic['anat'])
            stats_report_filename = os.path.join(subject_session_output_dir,
                                                 'report_stats.html')

            report = make_glm_report(
                fmri_glm,
                contrasts,
                threshold=3.0,
                bg_img=anat_img,
                cluster_threshold=15,
                title="GLM for subject %s" % session_id,
            )
            report.save_as_html(stats_report_filename)

    if not surface:
        ProgressReport().finish_dir(subject_session_output_dir)
        print("Statistic report written to %s\r\n" % stats_report_filename)