コード例 #1
0
def generate_normalization_thumbnails(
    normalized_files,
    output_dir,
    brain="EPI",
    execution_log_html_filename=None,
    results_gallery=None,
    ):
    """Generate thumbnails after spatial normalization or subject

    Parameters
    ----------
    normalized_files: list
        paths to normalized images (3Ds or 4Ds)

    output_dir: string
        dir to which all output will be written

    brain: string (optional)
        a short comment/tag like 'epi', or 'anat'

    result_gallery: ResultsGallery instance (optional)
        gallery to which thumbnails will be committed

    """

    if isinstance(normalized_files, basestring):
        normalized = normalized_files
    else:
        mean_normalized_img = io_utils.compute_mean_3D_image(normalized_files)
        normalized = mean_normalized_img

    return generate_registration_thumbnails(
        (T1_TEMPLATE, 'template'),
        (normalized, brain),
        "Normalization of %s" % brain,
        output_dir,
        execution_log_html_filename=execution_log_html_filename,
        results_gallery=results_gallery,
        )
コード例 #2
0
contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1']
contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2']

"""fit GLM"""
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(fmri_files, design_matrix.matrix,
           mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

"""save computed mask"""
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
mean_fmri_files = compute_mean_3D_image(fmri_files)
anat_img = nibabel.load(anat_file)
anat = anat_img.get_data()
anat_affine = anat_img.get_affine()

print "Computing contrasts .."
z_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, eff_map, var_map = fmri_glm.contrast(
        contrasts[contrast_id],
        con_id=contrast_id,
        output_z=True,
        output_stat=True,
        output_effects=True,
        output_variance=True,
コード例 #3
0
def plot_segmentation(img, gm_filename, wm_filename=None,
                      csf_filename=None,
                      output_filename=None, cut_coords=None,
                      slicer='ortho',
                      cmap=None,
                      title='GM + WM + CSF segmentation'):
    """
    Plot a contour mapping of the GM, WM, and CSF of a subject's anatomical.

    Parameters
    ----------
    img_filename: string or image object
                  path of file containing image data, or image object simply

    gm_filename: string
                 path of file containing Grey Matter template

    wm_filename: string (optional)
                 path of file containing White Matter template

    csf_filename: string (optional)
                 path of file containing Cerebro-Spinal Fluid template


    """

    # sanity
    if cmap is None:
        cmap = pl.cm.gray

    if cut_coords is None:
        cut_coords = (-10, -28, 17)

    if slicer in ['x', 'y', 'z']:
        cut_coords = (cut_coords['xyz'.index(slicer)],)

    # plot img
    if hasattr(img, '__len__'):
        img = compute_mean_3D_image(img)
    # XXX else i'm assuming a nifi object ;)
    anat = img.get_data()
    anat_affine = img.get_affine()
    _slicer = viz.plot_anat(
        anat, anat_affine, cut_coords=cut_coords,
        slicer=slicer,
        cmap=cmap,
        # black_bg=True,
        )

    # draw a GM contour map
    gm = nibabel.load(gm_filename)
    gm_template = gm.get_data()
    gm_affine = gm.get_affine()
    _slicer.contour_map(gm_template, gm_affine, levels=[.51], colors=["r"])

    # draw a WM contour map
    if not wm_filename is None:
        wm = nibabel.load(wm_filename)
        wm_template = wm.get_data()
        wm_affine = wm.get_affine()
        _slicer.contour_map(wm_template, wm_affine, levels=[.51], colors=["g"])

    # draw a CSF contour map
    if not csf_filename is None:
        csf = nibabel.load(csf_filename)
        csf_template = csf.get_data()
        csf_affine = csf.get_affine()
        _slicer.contour_map(
            csf_template, csf_affine, levels=[.51], colors=['b'])

    # misc
    _slicer.title("%s (cmap: %s)" % (title, cmap.name), size=12, color='w',
                 alpha=0)
    # pl.legend(("WM", "CSF", "GM"), loc="lower left", ncol=len(cut_coords))

    if not output_filename is None:
        pl.savefig(output_filename, bbox_inches='tight', dpi=200,
                   facecolor="k",
                   edgecolor="k")
コード例 #4
0
def plot_registration(reference_img, coregistered_img,
                      title="untitled coregistration!",
                      cut_coords=None,
                      slicer='ortho',
                      cmap=None,
                      output_filename=None):
    """Plots a coregistered source as bg/contrast for the reference image

    Parameters
    ----------
    reference_img: string
        path to reference (background) image

    coregistered_img: string
        path to other image (to be compared with reference)

    slicer: string (optional, defaults to 'ortho')
        slicer param to pass to the nipy.labs.viz.plot_??? APIs

    cmap: matplotlib colormap object (optional, defaults to spectral)
        colormap to user for plots

    output_filename: string (optional)
        path where plot will be stored

    """

    # sanity
    if cmap is None:
        cmap = pl.cm.gray  # registration QA always gray cmap!

    if cut_coords is None:
        cut_coords = (-10, -28, 17)

    if slicer in ['x', 'y', 'z']:
        cut_coords = (cut_coords['xyz'.index(slicer)],)

    # plot the coregistered image
    if hasattr(coregistered_img, '__len__'):
        coregistered_img = compute_mean_3D_image(coregistered_img)
    # XXX else i'm assuming a nifi object ;)
    coregistered_data = coregistered_img.get_data()
    coregistered_affine = coregistered_img.get_affine()
    _slicer = viz.plot_anat(
        anat=coregistered_data,
        anat_affine=coregistered_affine,
        cmap=cmap,
        cut_coords=cut_coords,
        slicer=slicer,
        # black_bg=True,
        )

    # overlap the reference image
    if hasattr(reference_img, '__len__'):
        reference_img = compute_mean_3D_image(reference_img)
    # XXX else i'm assuming a nifi object ;)
    reference_data = reference_img.get_data()
    reference_affine = reference_img.get_affine()
    _slicer.edge_map(reference_data, reference_affine)

    # misc
    _slicer.title("%s (cmap: %s)" % (title, cmap.name), size=12, color='w',
                  alpha=0)

    if not output_filename is None:
        try:
            pl.savefig(output_filename, dpi=200, bbox_inches='tight',
                       facecolor="k",
                       edgecolor="k")
        except AttributeError:
            # XXX TODO: handy this case!!
            pass
コード例 #5
0
def generate_segmentation_thumbnails(
    normalized_files,
    output_dir,
    subject_gm_file=None,
    subject_wm_file=None,
    subject_csf_file=None,
    brain='EPI',
    execution_log_html_filename=None,
    cmap=None,
    results_gallery=None,
    ):
    """Generates thumbnails after indirect normalization
    (segmentation + normalization)

    Parameters
    ----------
    normalized_file: list
        paths to normalized images (3Ds or 4Ds)

    output_dir: string
        dir to which all output will be written

    subject_gm_file: string (optional)
        path to subject GM file

    subject_csf_file: string (optional)
        path to subject WM file

    subject_csf_file: string (optional)
        path to subject CSF file

    brain: string (optional)
        a short commeent/tag like 'epi', or 'anat'

    cmap: optional
        cmap (color map) to use for plots

    result_gallery: base_reporter.ResultsGallery instance (optional)
        gallery to which thumbnails will be committed

    """

    if isinstance(normalized_files, basestring):
        normalized_file = normalized_files
    else:
        mean_normalized_file = os.path.join(output_dir,
                                            "%s.nii" % brain)

        io_utils.compute_mean_3D_image(normalized_files,
                           output_filename=mean_normalized_file)
        normalized_file = mean_normalized_file

    output = {}

    # prepare for smart caching
    qa_cache_dir = os.path.join(output_dir, "QA")
    if not os.path.exists(qa_cache_dir):
        os.makedirs(qa_cache_dir)
    qa_mem = joblib.Memory(cachedir=qa_cache_dir, verbose=5)

    thumb_desc = "Segmentation of %s " % brain
    if execution_log_html_filename:
        thumb_desc += (" (<a href=%s>see execution "
                       "log</a>)") % (os.path.basename(
                execution_log_html_filename))

    # plot contours of template compartments on subject's brain
    template_compartments_contours = os.path.join(
        output_dir,
        "template_tmps_contours_on_%s.png" % brain)
    template_compartments_contours_axial = os.path.join(
        output_dir,
        "template_compartments_contours_on_%s_axial.png" % brain)

    qa_mem.cache(check_preprocessing.plot_segmentation)(
        normalized_file,
        GM_TEMPLATE,
        wm_filename=WM_TEMPLATE,
        csf_filename=CSF_TEMPLATE,
        output_filename=template_compartments_contours_axial,
        slicer='z',
        cmap=cmap,
        title="template TPMs")

    qa_mem.cache(check_preprocessing.plot_segmentation)(
        normalized_file,
        gm_filename=GM_TEMPLATE,
        wm_filename=WM_TEMPLATE,
        csf_filename=CSF_TEMPLATE,
        output_filename=template_compartments_contours,
        cmap=cmap,
        title=("Template GM, WM, and CSF contours on "
               "subject's %s") % brain)

    # create thumbnail
    if results_gallery:
        thumbnail = base_reporter.Thumbnail()
        thumbnail.a = base_reporter.a(
            href=os.path.basename(template_compartments_contours))
        thumbnail.img = base_reporter.img(
            src=os.path.basename(template_compartments_contours),
            height="250px")
        thumbnail.description = thumb_desc

        results_gallery.commit_thumbnails(thumbnail)

    # plot contours of subject's compartments on subject's brain
    if subject_gm_file:
        subject_compartments_contours = os.path.join(
            output_dir,
            "subject_tmps_contours_on_subject_%s.png" % brain)
        subject_compartments_contours_axial = os.path.join(
            output_dir,
            "subject_tmps_contours_on_subject_%s_axial.png" % brain)

        qa_mem.cache(check_preprocessing.plot_segmentation)(
            normalized_file,
            subject_gm_file,
            wm_filename=subject_wm_file,
            csf_filename=subject_csf_file,
            output_filename=subject_compartments_contours_axial,
            slicer='z',
            cmap=cmap,
            title="subject TPMs")

        title_prefix = "Subject's GM"
        if subject_wm_file:
            title_prefix += ", WM"
        if subject_csf_file:
            title_prefix += ", and CSF"
        qa_mem.cache(check_preprocessing.plot_segmentation)(
            normalized_file,
            subject_gm_file,
            wm_filename=subject_wm_file,
            csf_filename=subject_csf_file,
            output_filename=subject_compartments_contours,
            cmap=cmap,
            title=("%s contours on "
               "subject's %s") % (title_prefix, brain))

        # create thumbnail
        if results_gallery:
            thumbnail = base_reporter.Thumbnail()
            thumbnail.a = base_reporter.a(
                href=os.path.basename(subject_compartments_contours))
            thumbnail.img = base_reporter.img(
                src=os.path.basename(subject_compartments_contours),
                height="250px")
            thumbnail.description = thumb_desc

            results_gallery.commit_thumbnails(thumbnail)

    output['axials'] = {}
    output['axial'] = template_compartments_contours_axial

    return output
コード例 #6
0
"""more interesting contrasts"""
contrasts['active-rest'] = contrasts['active'] - contrasts['rest']

"""fit GLM"""
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FMRILinearModel(fmri_files, design_matrix.matrix,
           mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')

"""save computed mask"""
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)

# compute bg unto which activation will be projected
anat_img = compute_mean_3D_image(fmri_files)
anat = anat_img.get_data()
anat_affine = anat_img.get_affine()

print "Computing contrasts .."
z_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
    print "\tcontrast id: %s" % contrast_id
    z_map, t_map, eff_map, var_map = fmri_glm.contrast(
        contrasts[contrast_id],
        con_id=contrast_id,
        output_z=True,
        output_stat=True,
        output_effects=True,
        output_variance=True,
        )