def plot_pmaps():
    for gr in groups:
        filename = '_'.join(['pmap', 'voxel',
                             'norm', '_'.join(gr), 'baseline',
                             'adni' ]) + '.nii'
        nii_img = os.path.join( NII_DIR, filename)
        for ext in ['.png', '.pdf', '.svg']:
            try:
                print np.max(nib.load(nii_img).get_data())
                vm = 8
                plot_img(nii_img, bg_img=MNI_TEMPLATE,
                         colorbar=True, cmap=cmap.hot,
                         cut_coords=(0, -45, 32),
                         output_file=os.path.join('figures', 'release',
                                                  filename.split('.')[0])+ext,
                         black_bg=True, threshold=5, vmin=0, vmax=vm,
                         title='/'.join(gr))
            except ValueError:
                plot_img(nii_img, bg_img=MNI_TEMPLATE,
                         colorbar=True, cmap=cmap.hot,
                         cut_coords=(0, -45, 32),
                         output_file=os.path.join('figures', 'release',
                                                  filename.split('.')[0])+ext,
                         black_bg=True, threshold=2, vmin=0, vmax=vm,
                         title='/'.join(gr))
def plot_segmentation(
        img, gm_filename, wm_filename=None, csf_filename=None,
        output_filename=None, cut_coords=None, display_mode='ortho',
        cmap=None, title='GM + WM + CSF segmentation', close=False):
    """
    Plot a contour mapping of the GM, WM, and CSF of a subject's anatomical.

    Parameters
    ----------
    img_filename: string or image object
                  path of file containing image data, or image object simply

    gm_filename: string
                 path of file containing Grey Matter template

    wm_filename: string (optional)
                 path of file containing White Matter template

    csf_filename: string (optional)
                 path of file containing Cerebro-Spinal Fluid template


    """
    # misc
    if cmap is None:
        cmap = plt.cm.gray
    if cut_coords is None:
        cut_coords = (-10, -28, 17)
    if display_mode in ['x', 'y', 'z']:
        cut_coords = (cut_coords['xyz'.index(display_mode)],)

    # plot img
    img = mean_img(img)
    img = reorder_img(img, resample="continuous")
    _slicer = plot_img(img, cut_coords=cut_coords, display_mode=display_mode,
                       cmap=cmap, black_bg=True)

    # add TPM contours
    gm = nibabel.load(gm_filename)
    _slicer.add_contours(gm, levels=[.51], colors=["r"])
    if not wm_filename is None:
        _slicer.add_contours(wm_filename, levels=[.51], colors=["g"])
    if not csf_filename is None:
        _slicer.add_contours(csf_filename, levels=[.51], colors=['b'])

    # misc
    _slicer.title(title, size=12, color='w', alpha=0)
    if not output_filename is None:
        plt.savefig(output_filename, bbox_inches='tight', dpi=200,
                    facecolor="k",
                    edgecolor="k")
        if close:
            plt.close()
Exemplo n.º 3
0
def preprocess_fmri_fullbrain(experiment_data):
    # prepare the full-brain fMRI activation plots
    # http://nilearn.github.io/plotting/index.html
    fmri_data = nibabel.load(experiment_data['nifti_path'])
    fmri_data_slices = nibabel.four_to_three(fmri_data)
    for i in range(MAXIMUM_EPI_PLOTS):
        output_file_cond = os.path.join(
            os.path.dirname(__file__), '..', '..', 'temp', 'fmri',
            experiment_data['participant'] + '_fMRIfull_' + str(i) + '.png')

        # TODO make the cut slice configurable directly from the UI
        if OVERWRITE_EPI_PLOTS or not os.path.exists(output_file_cond):
            plotting.plot_img(fmri_data_slices[i],
                              output_file=output_file_cond,
                              title='Scan ' + str(i),
                              cut_coords=(config.FMRI_CUT_SLICE_X,
                                          config.FMRI_CUT_SLICE_Y,
                                          config.FMRI_CUT_SLICE_Z),
                              annotate=True,
                              draw_cross=True,
                              black_bg=True,
                              cmap=plt.cm.nipy_spectral)
def plot_tmaps():
    for gr in groups:
        filename = '_'.join(['tmap', 'regions',
                             '_'.join(gr) ]) + '.nii.gz'
        nii_img = os.path.join( NII_DIR, filename)
        for ext in ['.png', '.pdf', '.svg']:
            try:
                print np.max(nib.load(nii_img).get_data())
                vm = 7
                plot_img(nii_img, bg_img=MNI_TEMPLATE, cmap=cm.cold_hot,
                         black_bg=True, threshold=4,
                         vmin = -vm, vmax=vm, cut_coords=(0, 0, 36),
                         output_file=os.path.join('figures', 'release',
                                                  filename.split('.')[0])+ext,
                         title='/'.join(gr), colorbar=True)
            except ValueError:
                plot_img(nii_img, bg_img=MNI_TEMPLATE, cmap=cm.cold_hot,
                         black_bg=True, threshold=2.3, cut_coords=(0, 0, 36),
                         output_file=os.path.join('figures', 'release',
                                                  filename.split('.')[0])+ext,
                         vmin = -vm, vmax=vm,
                         title='/'.join(gr), colorbar=True)
Exemplo n.º 5
0
def create_maps(masker,
                distribution,
                output_path,
                vmax=None,
                not_glass_brain=False,
                logger=None,
                distribution_max=None,
                distribution_min=None):
    """ Create the maps from the distribution.
    Arguments:
        - masker: NifitMasker
        - distribution: np.array (1D)
        - output_path: str
        - vmax: float
        - not_glass_brain: bool
    """
    logger.info("Transforming array to .nii image...")
    if distribution_min is not None:
        distribution[np.where(
            distribution < distribution_min)] = np.nan  # remove outliers
    if distribution_max is not None:
        distribution[np.where(
            distribution > distribution_max)] = np.nan  # remove outliers
    img = masker.inverse_transform(distribution)
    logger.validate()
    logger.info("Saving image...")
    nib.save(img, output_path + '.nii.gz')
    logger.validate()

    plt.hist(distribution[~np.isnan(distribution)], bins=50)
    plt.savefig(output_path + '_hist.png')
    plt.close()

    logger.info("Saving glass brain...")
    if not_glass_brain:
        display = plot_img(img,
                           colorbar=True,
                           black_bg=True,
                           cut_coords=(-48, 24, -10))
        display.savefig(output_path + '.png')
        display.close()
    else:
        display = plot_glass_brain(img,
                                   display_mode='lzry',
                                   colorbar=True,
                                   black_bg=True,
                                   vmax=vmax,
                                   plot_abs=False)
        display.savefig(output_path + '.png')
        display.close()
    logger.validate()
Exemplo n.º 6
0
 def cor_brain(self, numproc=1, plot=True, mask=True):
     """Method to generate an brain of correlations between all brain volumes in the 4th dimension and their labels.
     
     :param numproc: {int} number of parallel processes applied to calculate the covbrain.
     :param plot: {bool} whether the generated brains should be visualized
     :param mask: {bool} whether the mask in the attribute :py:attr:`mask` should be applied to the image before.
     :return: the correlation brain as 3D Nifti1Image
     """
     print("Computing correlation brain...")
     if mask:
         if self.data is not None:
             vectorized = self.data
         else:
             vectorized = apply_mask(self.img, self.mask)
     else:
         vectorized = apply_mask(
             self.img,
             Nifti1Image(np.ones(self.img.shape[:3]), self.img.affine))
     with warnings.catch_warnings():  # ignore RuntimeWarning
         warnings.simplefilter("ignore")
         try:
             p = Pool(numproc)  # number of parallel processes
             corval = np.array(
                 p.map(partial(_cor_comp, self.targets), vectorized.T))
         finally:
             p.close()
             p.join()
     corval = np.nan_to_num(corval)  # replace nan with 0
     if mask:
         self.cor = corval
     else:
         self.cor = Nifti1Image(corval.reshape(self.img.shape[:3]),
                                self.img.affine)
         self.cor.uncache()
     if plot and not mask:
         plot_img(self.cor, title="Correlation brain")
     del vectorized
     print("\tCorrelation brain computed!")
Exemplo n.º 7
0
    def std_brain(self, plot=True, mask=False):
        """Method to generate an brain of standard deviations from all brain volumes in the 4th dimension.

        :param plot: {bool} whether the generated brains should be visualized
        :param mask: {bool} whether the mask in the attribute :py:attr:`mask` should be applied to the image before.
        :return: depending on the given options an averaged brain and / or standard-deviation-brain as numpy.array
        """
        print("Computing std brain...")
        if mask:
            if self.data is not None:
                vectorized = self.data
            else:
                vectorized = apply_mask(self.img, self.mask)
        else:
            vectorized = apply_mask(self.img, Nifti1Image(np.ones(self.img.shape[:3]), self.img.affine))
        
        self.std = Nifti1Image(np.std(vectorized, axis=0).reshape(self.img.shape[:3]),
                               self.img.affine)
        if plot:
            plot_img(self.std, title='Standard Deviation Brain')
        del vectorized
        self.std.uncache()
        print("\tStd brain computed!")
Exemplo n.º 8
0
def test_plot_img_with_resampling(binary_img, testdata_3d):  # noqa:F811
    """Tests for plot_img with resampling of the data image."""
    img = _testdata_3d_for_resampling(testdata_3d['img'], binary_img)
    if binary_img:
        assert _is_binary_niimg(img)
    else:
        assert not _is_binary_niimg(img)
    display = plot_img(img)
    display.add_overlay(img)
    display.add_contours(img,
                         contours=2,
                         linewidth=4,
                         colors=['limegreen', 'yellow'])
    display.add_edges(img, color='c')
    plt.close()
Exemplo n.º 9
0
def get_projections(grad_file, hcp_img, componant):

    grads = pd.read_csv(grad_file)

    R_gradient = grads['R_grad_' + componant]
    L_gradient = grads['L_grad_' + componant]

    print(componant)

    img = load_img(hcp_img)
    data = (img.get_data())
    x = np.shape(data)[0]
    y = np.shape(data)[1]
    z = np.shape(data)[2]
    R_rois = np.loadtxt("cfg/R_coords.txt")
    L_rois = np.loadtxt("cfg/L_coords.txt")
    #rois = coords.values
    R_vals = np.column_stack((R_gradient, R_rois))
    L_vals = np.column_stack((L_gradient, L_rois))

    #np.place(data, data==1, 1)

    subcortical_ROIs = np.linspace(0, 255, 256)

    for i, val in enumerate(R_rois):
        np.place(data, data == val, (R_gradient[i] * 1000) + 5000)

    for i, val in enumerate(L_rois):
        np.place(data, data == val, (L_gradient[i] * 1000) + 5000)

    for i, val in enumerate(subcortical_ROIs):
        np.place(data, data == val, 4600)

    final_img = nib.Nifti1Image(data, img.affine, img.header)
    nifty_name = "final_image.nii.gz"
    nib.save(final_img, snakemake.output.projected_image)

    display = plotting.plot_img(snakemake.output.projected_image,
                                cut_coords=(14, 10, 0),
                                threshold=4601,
                                title="Diffusion Gradient",
                                cmap='gist_rainbow',
                                colorbar=True)

    display.savefig(snakemake.output.projected_plot)
Exemplo n.º 10
0
def viz_all_imgs(path, count):
    output_dir = './imgs_visualization'
    if not os.path.exists(output_dir):
        os.mkdir('./imgs_visualization')
    for f in os.listdir(path):
        if f.endswith('mgz'):
            print(f'{count[0]}: {path}/{f}')

            # further reprocessing
            img = nib.load(os.path.join(path, 'brainmask.mgz'))
            img = np.swapaxes(img.get_data(), 1, 2)
            img = np.flip(img, 1)
            img = np.flip(img, 2)
            img = resize(img, (sp_size, sp_size, sp_size), mode='constant')
            img = torch.from_numpy(img).float().view(1, sp_size, sp_size,
                                                     sp_size)
            img = img * 2 - 1

            featmask = np.squeeze((0.5 * img + 0.5).data.cpu().numpy())
            featmask = nib.Nifti1Image(featmask, affine=np.eye(4))
            disp = plotting.plot_img(featmask,
                                     cut_coords=arr1,
                                     draw_cross=False,
                                     annotate=False,
                                     black_bg=True,
                                     display_mode='x')
            plotting.show()
            disp = plotting.plot_img(featmask,
                                     cut_coords=arr2,
                                     draw_cross=False,
                                     annotate=False,
                                     black_bg=True,
                                     display_mode='x')
            plotting.show()
            subject = path.split('/')[-2]
            plotting.plot_img(featmask,
                              title=f'subject: {subject}_index: {count}')
            plotting.plot_img(featmask,title=f'subject: {subject}_index: {count}',\
                              output_file=f'{output_dir}/subject_{subject}_index_{count[0]}.png')
            plotting.show()
            count[0] += 1
        elif os.path.isdir(f'{path}/{f}'):
            viz_all_imgs(f'{path}/{f}', count)
Exemplo n.º 11
0
def _carpet(
    data,
    seg,
    order,
    cmap,
    tr=None,
    detrend=True,
    subplot=None,
    legend=False,
    title=None,
    output_file=None,
    epinii=None,
    segnii=None,
    nslices=None,
):
    """Common carpetplot building code for volumetric / CIFTI plots"""
    notr = False
    if tr is None:
        notr = True
        tr = 1.0

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(
        1,
        2 + int(legend),
        subplot_spec=subplot,
        width_ratios=wratios[: 2 + int(legend)],
        wspace=0.0,
    )

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(seg[order, np.newaxis], interpolation="none", aspect="auto", cmap=cmap)

    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color("none")
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(
        data[order],
        interpolation="nearest",
        aspect="auto",
        cmap="gray",
        vmin=v[0],
        vmax=v[1],
    )

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    ax1.set_xlabel("time (frame #)" if notr else "time (s)")
    labels = tr * (np.array(xticks))
    ax1.set_xticklabels(["%.02f" % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color("none")
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color("none")
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position("left")
    ax1.xaxis.set_ticks_position("bottom")
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color("none")
    ax1.spines["left"].set_visible(False)

    ax2 = None
    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(
            5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0
        )
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(
            np.uint8
        )
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(
                segnii,
                bg_img=epinii,
                axes=ax2,
                display_mode="z",
                annotate=False,
                cut_coords=[c],
                threshold=0.1,
                cmap=cmap,
                interpolation="nearest",
            )

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches="tight")
        plt.close(figure)
        figure = None
        return output_file

    return (ax0, ax1, ax2), gs
# -------------------
#
# .. note:: In this tutorial, we load the data using a data downloading
#           function. To input your own data, you will need to provide
#           a list of paths to your own files in the ``subject_data`` variable.
#           These should abide to the Brain Imaging Data Structure (BIDS)
#           organization.

from nilearn.datasets import fetch_spm_auditory
subject_data = fetch_spm_auditory()
print(subject_data.func)  # print the list of names of functional images

###############################################################################
# We can display the first functional image and the subject's anatomy:
from nilearn.plotting import plot_stat_map, plot_anat, plot_img, show
plot_img(subject_data.func[0])
plot_anat(subject_data.anat)

###############################################################################
# Next, we concatenate all the 3D EPI image into a single 4D image,
# then we average them in order to create a background
# image that will be used to display the activations:

from nilearn.image import concat_imgs, mean_img
fmri_img = concat_imgs(subject_data.func)
mean_img = mean_img(fmri_img)

###############################################################################
# Specifying the experimental paradigm
# ------------------------------------
#
Exemplo n.º 13
0
    def _reporting(self):
        """
        Returns
        -------
        displays : list
            A list of all displays to be rendered.

        """
        try:
            from nilearn import plotting
        except ImportError:
            with warnings.catch_warnings():
                mpl_unavail_msg = ('Matplotlib is not imported! '
                                   'No reports will be generated.')
                warnings.filterwarnings('always', message=mpl_unavail_msg)
                warnings.warn(category=ImportWarning, message=mpl_unavail_msg)
                return [None]

        img = self._reporting_data['images']
        mask = self._reporting_data['mask']
        if img is not None:
            dim = image.load_img(img).shape
            if len(dim) == 4:
                # compute middle image from 4D series for plotting
                img = image.index_img(img, dim[-1] // 2)
        else:  # images were not provided to fit
            msg = ("No image provided to fit in NiftiMasker. "
                   "Setting image to mask for reporting.")
            warnings.warn(msg)
            self._warning_message = msg
            img = mask

        # create display of retained input mask, image
        # for visual comparison
        init_display = plotting.plot_img(img, black_bg=False, cmap='CMRmap_r')
        init_display.add_contours(mask,
                                  levels=[.5],
                                  colors='g',
                                  linewidths=2.5)

        if 'transform' not in self._reporting_data:
            return [init_display]

        else:  # if resampling was performed
            self._report_description = (self._report_description +
                                        self._overlay_text)

            # create display of resampled NiftiImage and mask
            # assuming that resampl_img has same dim as img
            resampl_img, resampl_mask = self._reporting_data['transform']
            if resampl_img is not None:
                if len(dim) == 4:
                    # compute middle image from 4D series for plotting
                    resampl_img = image.index_img(resampl_img, dim[-1] // 2)
            else:  # images were not provided to fit
                resampl_img = resampl_mask

            final_display = plotting.plot_img(resampl_img,
                                              black_bg=False,
                                              cmap='CMRmap_r')
            final_display.add_contours(resampl_mask,
                                       levels=[.5],
                                       colors='g',
                                       linewidths=2.5)

        return [init_display, final_display]
Exemplo n.º 14
0
    anat_list = glob.glob(os.path.join(anat_path, '[E-n]*.hdr'))
    
    if len(fmri_list) == 291 and len(anat_list) == 1 and subject_id != 'S14659':
        print subject_id, len(anat_list), len(fmri_list)

        subject_data = SubjectData(func=[fmri_list[2:]],
                                   anat=anat_list[0],
                                   output_dir=os.path.join(OUPUT_DIR,
                                   subject_id))
                                   
        
        anat_name = '_'.join([subject_id, 'anat'])
        r_img_anat = image.reorder_img(anat_list[0], resample=True)
        
        plotting.plot_img(r_img_anat,
                          output_file=os.path.join(OUPUT_DIR, 'figs', anat_name),
                          title=anat_name,
                          black_bg=True)
        #############################################
        do_subject_preproc(
            subject_data, #subject data class
            deleteorient=False, #
        
            slice_timing=True,
            slice_order="ascending",
            interleaved=True,
            refslice=1,
            TR=2.4,
            TA=2.3,
            slice_timing_software="spm",
        
            realign=True,
Exemplo n.º 15
0
basic nilearn functionalities.
"""

# Let us use a Nifti file that is shipped with nilearn
from nilearn.datasets import MNI152_FILE_PATH

# Note that the variable MNI152_FILE_PATH is just a path to a Nifti file
print('Path to MNI152 template: %r' % MNI152_FILE_PATH)

#########################################################################
# A first step: looking at our data
# ----------------------------------
#
# Let's quickly plot this file:
from nilearn import plotting
plotting.plot_img(MNI152_FILE_PATH)

#########################################################################
# This is not a very pretty plot. We just used the simplest possible
# code. There is a whole :ref:`section of the documentation <plotting>`
# on making prettier code.
#
# **Exercise**: Try plotting one of your own files. In the above,
# MNI152_FILE_PATH is nothing more than a string with a path pointing to
# a nifti image. You can replace it with a string pointing to a file on
# your disk. Note that it should be a 3D volume, and not a 4D volume.

#########################################################################
# Simple image manipulation: smoothing
# -------------------------------------
#
Exemplo n.º 16
0
    def _reporting(self):
        """
        Returns
        -------
        displays : list
            A list of all displays to be rendered.

        """
        from nilearn.reporting.html_report import _embed_img
        from nilearn import plotting
        if self._reporting_data is not None:
            maps_image = self._reporting_data['maps_image']
        else:
            maps_image = None

        if maps_image is not None:
            n_maps = image.get_data(maps_image).shape[-1]
            maps_to_be_displayed = range(n_maps)
            if isinstance(self.displayed_maps, int):
                if n_maps < self.displayed_maps:
                    msg = ("`generate_report()` received "
                           f"{self.displayed_maps} to be displayed. "
                           f"But masker only has {n_maps} maps."
                           f"Setting number of displayed maps to {n_maps}.")
                    warnings.warn(category=UserWarning, message=msg)
                    self.displayed_maps = n_maps
                maps_to_be_displayed = range(self.displayed_maps)
            elif isinstance(self.displayed_maps, (list, np.ndarray)):
                if max(self.displayed_maps) > n_maps:
                    raise ValueError("Report cannot display the "
                                     "following maps "
                                     f"{self.displayed_maps} because "
                                     f"masker only has {n_maps} maps.")
                maps_to_be_displayed = self.displayed_maps
            self._report_content['report_id'] = self.report_id
            self._report_content['number_of_maps'] = n_maps
            self._report_content['displayed_maps'] = list(maps_to_be_displayed)
            img = self._reporting_data['img']
            embeded_images = []
            if img is not None:
                dim = image.load_img(img).shape
                if len(dim) == 4:
                    # compute middle image from 4D series for plotting
                    img = image.index_img(img, dim[-1] // 2)
                # Find the cut coordinates
                cut_coords = [
                    plotting.find_xyz_cut_coords(image.index_img(
                        maps_image, i)) for i in maps_to_be_displayed
                ]
                for idx, component in enumerate(maps_to_be_displayed):
                    display = plotting.plot_img(img,
                                                cut_coords=cut_coords[idx],
                                                black_bg=False,
                                                cmap='CMRmap_r')
                    display.add_overlay(image.index_img(maps_image, idx),
                                        cmap=plotting.cm.black_blue)
                    embeded_images.append(_embed_img(display))
                    display.close()
                return embeded_images
            else:
                msg = ("No image provided to fit in NiftiMapsMasker. "
                       "Plotting only spatial maps for reporting.")
                warnings.warn(msg)
                self._report_content['warning_message'] = msg
                for component in maps_to_be_displayed:
                    display = plotting.plot_stat_map(
                        image.index_img(maps_image, component))
                    embeded_images.append(_embed_img(display))
                    display.close()
                return embeded_images
        else:
            return [None]
# ---------------------
# Next, we use the vessel filter to estimate the vasculature from the QSM data
vessel_result = nighres.filtering.multiscale_vessel_filter(
    input_image=skullstripping_results['t1w_masked'],
    scales=2,
    save_data=True,
    file_name="sub001_sess1",
    output_dir=out_dir)

############################################################################
# Now we look at the topology-constrained segmentation MGDM created
if not skip_plots:
    plotting.plot_img(vessel_result['pv'],
                      vmin=0,
                      vmax=1,
                      cmap='cubehelix',
                      colorbar=True,
                      annotate=False,
                      draw_cross=False)
    plotting.plot_img(vessel_result['diameter'],
                      vmin=0,
                      vmax=4,
                      cmap='cubehelix',
                      colorbar=True,
                      annotate=False,
                      draw_cross=False)

############################################################################

#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
Exemplo n.º 18
0
#!/usr/bin/env python

from nilearn import plotting
import numpy as np
import os
import nibabel as nib
import matplotlib.pyplot as plt

### View basic images
# point to dataset
aud = "./Data/audio_contrast.nii"
plotting.plot_img(aud)

vis = "./Data/visual_contrast.nii"
plotting.plot_img(vis)

### Smoothing
from nilearn import image
smooth_aud_img = image.smooth_img(aud, fwhm=10)  # in-memory object
print(smooth_aud_img)
plotting.plot_img(smooth_aud_img)
plotting.show()  #required outside of ipython

smooth_aud_img.to_filename('smooth_aud_img.nii.gz')

### Visualizing a 3D file
bg = "./Data/TT_N27.nii"
plotting.plot_stat_map(aud, bg)  #no threshold
plotting.plot_stat_map(aud, bg, threshold=3)
plotting.plot_stat_map(vis, bg, threshold=3)
Exemplo n.º 19
0
    output_dir=out_dir,
    overwrite=False)

############################################################################
# Now we look at the topology-constrained segmentation MGDM created
if not skip_plots:
    plotting.plot_roi(massp['max_label'],
                      dataset['qr1'],
                      annotate=False,
                      black_bg=False,
                      draw_cross=False,
                      cmap='cubehelix')
    plotting.plot_img(massp['max_proba'],
                      vmin=0,
                      vmax=1,
                      cmap='gray',
                      colorbar=True,
                      annotate=False,
                      draw_cross=False)

############################################################################

#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
if not skip_plots:
    plotting.show()

#############################################################################
# References
# -----------
# .. [1] Caan et al. (2018) MP2RAGEME: T1, T2*, and QSM mapping in one sequence
Exemplo n.º 20
0
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(950, 800),
                subplot=None, title=None, output_file=None, legend=False,
                lut=None, tr=None):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        atlaslabels: ndarray
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int
            Number of volumes at the beginning of the scan marked as nonsteady state.
        long_cutoff : int
            Number of TRs to consider img too long (and decimate the time direction
            to save memory)
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # Frames is plotted instead of time.
    """

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.

    img_nii = check_niimg_4d(img, dtype='auto',)
    func_data = _safe_get_data(img_nii, ensure_finite=True)
    ntsteps = func_data.shape[-1]

    data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
    seg = atlaslabels[atlaslabels > 0].reshape(-1)

    # Map segmentation
    if lut is None:
        lut = np.zeros((256, ), dtype='int')
        lut[1:11] = 1
        lut[255] = 2
        lut[30:99] = 3
        lut[100:201] = 4

    # Apply lookup table
    newsegm = lut[seg.astype(int)]

    p_dec = 1 + data.shape[0] // size[0]
    if p_dec:
        data = data[::p_dec, :]
        newsegm = newsegm[::p_dec]

    t_dec = 1 + data.shape[1] // size[1]
    if t_dec:
        data = data[:, ::t_dec]

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # Order following segmentation labels
    order = np.argsort(newsegm)[::-1]

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto',
               cmap=mycolors, vmin=1, vmax=4)
    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray',
               vmin=v[0], vmax=v[1])

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
    labels = tr * (np.array(xticks)) * t_dec
    ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(
            5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0)
        epiavg = func_data.mean(3)
        epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
        segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header)
        segnii.set_data_dtype('uint8')

        nslices = epiavg.shape[-1]
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8)
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(segnii, bg_img=epinii, axes=ax2, display_mode='z',
                     annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
                     interpolation='nearest')

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        figure = None
        return output_file

    return [ax0, ax1], gs
f_values, p_values = f_classif(fmri_masked, y)
p_values = -np.log10(p_values)
p_values[p_values > 10] = 10
p_unmasked = nifti_masker.inverse_transform(p_values).get_data()

#########################################################################
# Visualization

# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
mean_fmri = image.mean_img(fmri_img)

from nilearn.plotting import plot_stat_map, plot_img, show
searchlight_img = new_img_like(mean_fmri, searchlight.scores_)

# Because scores are not a zero-center test statistics, we cannot use
# plot_stat_map
plot_img(searchlight_img, bg_img=mean_fmri,
         title="Searchlight", display_mode="z", cut_coords=[-9],
         vmin=.42, cmap='hot', threshold=.2, black_bg=True)

# F_score results
p_ma = np.ma.array(p_unmasked, mask=np.logical_not(process_mask))
f_score_img = new_img_like(mean_fmri, p_ma)
plot_stat_map(f_score_img, mean_fmri,
              title="F-scores", display_mode="z",
              cut_coords=[-9],
              colorbar=False)

show()
Exemplo n.º 22
0
 # MiniBatch Kmeans
 ##############################################################################
 
 mbk = MiniBatchKMeans(init='k-means++', n_clusters=N_CLUSTERS,
                       n_init=10, max_no_improvement=10, verbose=0)
 
 pet_loc_data = np.concatenate((pet_data_masked.T, loc.T), axis=1)
 
 mbk.fit(pet_loc_data)
 mbk_means_labels = mbk.labels_
 mbk_means_cluster_centers = mbk.cluster_centers_
 mbk_means_labels_unique = np.unique(mbk_means_labels)
 
 mbk_data = masker.inverse_transform(mbk_means_labels)
 
 plot_img(mbk_data)
 
 
 ##############################################################################
 # Generate cluster matrix
 ##############################################################################
 
 if USE_CENTROIDS:
     x = mbk_means_cluster_centers[:, :96].T
 else:
     x = np.zeros((len(data), N_CLUSTERS))
     for idx in np.arange(len(data)):
         for val in mbk_means_labels_unique:
             ind = (mbk_means_labels == val)
             x[idx, val] = np.mean(pet_data_masked[idx, ind])
 
Exemplo n.º 23
0
def plot_carpet(img,
                atlaslabels,
                detrend=True,
                nskip=0,
                size=(950, 800),
                subplot=None,
                title=None,
                output_file=None,
                legend=False,
                lut=None,
                tr=None):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        atlaslabels: ndarray
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int
            Number of volumes at the beginning of the scan marked as nonsteady state.
        long_cutoff : int
            Number of TRs to consider img too long (and decimate the time direction
            to save memory)
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # Frames is plotted instead of time.
    """

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.

    img_nii = check_niimg_4d(
        img,
        dtype='auto',
    )
    func_data = _safe_get_data(img_nii, ensure_finite=True)
    ntsteps = func_data.shape[-1]

    data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
    seg = atlaslabels[atlaslabels > 0].reshape(-1)

    # Map segmentation
    if lut is None:
        lut = np.zeros((256, ), dtype='int')
        lut[1:11] = 1
        lut[255] = 2
        lut[30:99] = 3
        lut[100:201] = 4

    # Apply lookup table
    newsegm = lut[seg.astype(int)]

    p_dec = 1 + data.shape[0] // size[0]
    if p_dec:
        data = data[::p_dec, :]
        newsegm = newsegm[::p_dec]

    t_dec = 1 + data.shape[1] // size[1]
    if t_dec:
        data = data[:, ::t_dec]

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # Order following segmentation labels
    order = np.argsort(newsegm)[::-1]

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(1,
                                     2 + int(legend),
                                     subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(newsegm[order, np.newaxis],
               interpolation='none',
               aspect='auto',
               cmap=mycolors,
               vmin=1,
               vmax=4)
    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(data[order, ...],
               interpolation='nearest',
               aspect='auto',
               cmap='gray',
               vmin=v[0],
               vmax=v[1])

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max(
        (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
    labels = tr * (np.array(xticks)) * t_dec
    ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(5,
                                               1,
                                               subplot_spec=gs[2],
                                               wspace=0.0,
                                               hspace=0.0)
        epiavg = func_data.mean(3)
        epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
        segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine,
                                epinii.header)
        segnii.set_data_dtype('uint8')

        nslices = epiavg.shape[-1]
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices),
                             5).astype(np.uint8)
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(segnii,
                     bg_img=epinii,
                     axes=ax2,
                     display_mode='z',
                     annotate=False,
                     cut_coords=[c],
                     threshold=0.1,
                     cmap=mycolors,
                     interpolation='nearest')

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        figure = None
        return output_file

    return [ax0, ax1], gs
Exemplo n.º 24
0
import numpy as np
import matplotlib.pyplot as plt
from nilearn import plotting
from nilearn import image
import os

#ruta de la carpeta
direccion = os.getcwd()
#Dibujando imagen estructural
rutaT1 = '/alejandra-parra/restilb-alejandra-parra/T1/T1.nii.gz'  #ruta imagen T1
T1 = direccion + rutaT1
plotting.plot_img(T1)  #graficar imagen T1
plt.title("Imagen estructural")
plt.show()  #mostrar imagen

#suavizar imagen filtro gaussiano
smoth = image.smooth_img(T1, fwhm=3)
plotting.plot_img(smoth)
plt.title("Imagen estructural suavizada")
plotting.show()

#Dibujando imagen funcional
rutafunc = '/alejandra-parra/restilb-alejandra-parra/func/func.nii.gz'
funcional = direccion + rutafunc  #img funcional
#tamaño img funcional, la ultima es el tiempo
print(image.load_img(funcional).shape)
#Tomamos un tiempo especifico para graficar
tiempo = 80
primerfunc = image.index_img(funcional, tiempo)
#bg img: imagen de fondo, aca puse la T1, threshold: cota inferior, dim: constraste img +oscuro -claro
plotting.plot_stat_map(primerfunc,
================
Rescaling demo
================

This example compares a volume before and after T1 correction.
"""
# Create a memory context
from nipype.caching import Memory
mem = Memory('/tmp')

# Give the path to the 4D ASL image
raw_asl_file = '/tmp/func.nii'

# Rescale
from procasl import preprocessing
rescale = mem.cache(preprocessing.Rescale)
out_rescale = rescale(
    in_file=raw_asl_file, ss_tr=35.4, t_i_1=800., t_i_2=1800.)

# Plot the first volume before and after rescaling
from nilearn import plotting
import matplotlib.pylab as plt
for filename, title in zip(
        [raw_asl_file, out_rescale.outputs.rescaled_file],
        ['raw', 'rescaled']):
    figure = plt.figure(figsize=(5, 4))
    volume_file = preprocessing.save_first_scan(filename)
    plotting.plot_img(volume_file, figure=figure, display_mode='z',
                      cut_coords=(65,), title=title, colorbar=True)
plt.show()
Exemplo n.º 26
0
# -*- coding: utf-8 -*-
"""
A script that uses nilearn to resample a segmentation image
"""

import os, glob, time
import nibabel as nib
from nilearn import plotting, image



input_filename = ''.join(['/disk4t/mehdi/data/pet_fdg_baseline_processed_ADNI/',
                          'I218153/ADNI_011_S_0183_MR_MAPER_segmentation',
                          ',_masked_Br_20110218145719200_S12000_I218153.nii'])
                 

original_img = nib.load(input_filename)
ord_img = image.reorder_img(original_img, resample=True)

close('all')
plotting.plot_img(ord_img)
Exemplo n.º 27
0
from procasl import datasets

heroes = datasets.load_heroes_dataset(
    subjects=(0,),
    subjects_parent_directory=os.path.join(os.path.expanduser("~/procasl_data"), "heroes"),
    paths_patterns={"raw ASL": "fMRI/acquisition1/vismot1_rawASL*.nii"},
)
raw_asl_file = heroes["raw ASL"][0]

# Create a memory context
from nipype.caching import Memory

cache_directory = "/tmp"
mem = Memory("/tmp")
os.chdir(cache_directory)
# Rescale
from procasl import preprocessing

rescale = mem.cache(preprocessing.Rescale)
out_rescale = rescale(in_file=raw_asl_file, ss_tr=35.4, t_i_1=800.0, t_i_2=1800.0)

# Plot the first volume before and after rescaling
from nilearn import plotting
import matplotlib.pylab as plt

for filename, title in zip([raw_asl_file, out_rescale.outputs.rescaled_file], ["raw", "rescaled"]):
    figure = plt.figure(figsize=(5, 4))
    first_scan_file = preprocessing.save_first_scan(filename)
    plotting.plot_img(first_scan_file, figure=figure, display_mode="z", cut_coords=(65,), title=title, colorbar=True)
plt.show()
Exemplo n.º 28
0
def plot_mgz(mgz_file):
    sns.set(color_codes=True)
    plotting.plot_img(mgz_file)
    img = nib.load(mgz_file)
    p = np.array(img.dataobj).flatten()
    sns.distplot(p[p != 0])
Exemplo n.º 29
0
        
    tmap = masker.inverse_transform(t_masked)
    pmap = masker.inverse_transform(p_masked)
    
    tscore = masker.inverse_transform(t_scores[0])
    pscore = masker.inverse_transform(neg_log_pvals[0])    

    t_path = os.path.join('figures',
                          'tmap_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni')
    p_path = os.path.join('figures',
                          'pmap_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni')
    
    plot_stat_map(tmap, tmap, output_file=t_path,
                  black_bg=True, title='/'.join(gr), cut_coords=(1,-21,11))
    plot_img(pmap, output_file=p_path, cmap=cm.hot, colorbar=True, vmin=0,
                  black_bg=True, title='/'.join(gr), cut_coords=(1,-21,11))
    tmap.to_filename(t_path+'.nii')
    pmap.to_filename(p_path+'.nii')
                  
    t_path = os.path.join('figures',
                          'tmap_perm_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni')
    p_path = os.path.join('figures',
                          'pmap_perm_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni')                  
                  
    """
    plot_stat_map(tscore, tscore, output_file=t_path,
                  black_bg=True, title='/'.join(gr), cut_coords=(1,-21,11),
                  cmap=cm.hot, colorbar=True)
    plot_stat_map(pscore, img, output_file=p_path,
                  black_bg=True, title='/'.join(gr), cut_coords=(1,-21,11),
                  cmap=cm.hot, colorbar=True)
Exemplo n.º 30
0
        print output_fig_name
        try:
            image.reorder_img(img)
            '''
            plotting.plot_img(img,
                  output_file=os.path.join('figs', output_fig_name),
                  title=str(img.shape),
                  black_bg=True)            
            '''
        except ValueError as exc:
                        
            ord_img = image.reorder_img(img, resample='nearest')
            resampled_image = image.resample_img(ord_img)
            affine = np.eye(4)
            plotting.plot_img(nib.Nifti1Image(img.get_data(), affine),
                  output_file=os.path.join('figures/visualization', output_fig_name+'x'),
                  title=str(img.shape),
                  black_bg=True)
            plotting.plot_img(ord_img,
                  output_file=os.path.join('figures/visualization', output_fig_name),
                  title=str(img.shape),
                  black_bg=True)
            ord_img.to_filename(os.path.join('figures/visualization', output_fig_name+'.nii'))
            error_counter += 1
            print str(filename[0])
            

print '{}/{} errors'.format(error_counter, counter) 


'''
# -------------------
#
# .. note:: In this tutorial, we load the data using a data downloading
#           function. To input your own data, you will need to provide
#           a list of paths to your own files in the ``subject_data`` variable.
#           These should abide to the Brain Imaging Data Structure (BIDS) 
#           organization.

from nistats.datasets import fetch_spm_auditory
subject_data = fetch_spm_auditory()
print(subject_data.func)  # print the list of names of functional images

###############################################################################
# We can display the first functional image and the subject's anatomy:
from nilearn.plotting import plot_stat_map, plot_anat, plot_img, show
plot_img(subject_data.func[0])
plot_anat(subject_data.anat)

###############################################################################
# Next, we concatenate all the 3D EPI image into a single 4D image,
# then we average them in order to create a background
# image that will be used to display the activations:

from nilearn.image import concat_imgs, mean_img
fmri_img = concat_imgs(subject_data.func)
mean_img = mean_img(fmri_img)

###############################################################################
# Specifying the experimental paradigm
# ------------------------------------
#
src.plot(subjects_dir=subjects_dir)

n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))

# We could write the mixed source space with::
#
#    >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#

###############################################################################
# Export source positions to nift file:
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)

plotting.plot_img(nii_fname, cmap=plt.cm.spectral)
plt.show()

# Compute the fwd matrix
fwd = make_forward_solution(fname_evoked, fname_trans, src, fname_bem,
                            mindist=5.0,  # ignore sources<=5mm from innerskull
                            meg=True, eeg=False, n_jobs=1)

leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)

src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))

# Load data
Exemplo n.º 33
0
        loss3.backward(retain_graph=True)
        cd_optimizer.step()

    ###############################################
    # Visualization
    ###############################################

    if iteration % 10 == 0:
        print('[{}/{}]'.format(iteration,TOTAL_ITER),
              'D: {:<8.3}'.format(loss2.data[0].cpu().numpy()),
              'En_Ge: {:<8.3}'.format(loss1.data[0].cpu().numpy()),
              'Code: {:<8.3}'.format(loss3.data[0].cpu().numpy()),
              )
        feat = np.squeeze((0.5*real_images[0]+0.5).data.cpu().numpy())
        feat = nib.Nifti1Image(feat,affine = np.eye(4))
        plotting.plot_img(feat,title="X_Real")
        plotting.show()

        feat = np.squeeze((0.5*x_hat[0]+0.5).data.cpu().numpy())
        feat = nib.Nifti1Image(feat,affine = np.eye(4))
        plotting.plot_img(feat,title="X_DEC")
        plotting.show()

        feat = np.squeeze((0.5*x_rand[0]+0.5).data.cpu().numpy())
        feat = nib.Nifti1Image(feat,affine = np.eye(4))
        plotting.plot_img(feat,title="X_rand")
        plotting.show()

    ###############################################
    # Model Save
    ###############################################
Exemplo n.º 34
0
"""Rescaling step """
# Create a memory context
from nipype.caching import Memory
mem = Memory('/tmp/no_workflow')

# Give the path to the 4D ASL image
raw_asl_file = '/tmp/func.nii'

# Rescale
from procasl import preprocessing
rescale = mem.cache(preprocessing.Rescale)
out_rescale = rescale(
    in_file=raw_asl_file, ss_tr=35.4, t_i_1=800., t_i_2=1800.)

# Plot the first volume before and after rescaling
from nilearn import plotting
import matplotlib.pylab as plt
figure, (axes1, axes2) = plt.subplots(2, 1, figsize=(7, 5))
for filename, title, axes in zip(
        [raw_asl_file, out_rescale.outputs.rescaled_file],
        ['raw', 'rescaled'], [axes1, axes2]):
    volume_file = preprocessing.save_first_scan(filename)
    plotting.plot_img(volume_file, axes=axes, display_mode='z',
                      cut_coords=(65, 75), title=title, colorbar=True)
plt.show()
Exemplo n.º 35
0
CACHE_DIR = '/home/mr234268/data'

dataset = datasets.fetch_adni_rs_fmri()
func_files = dataset['func']
dx_group = dataset['dx_group']

n_sample = 140
idx = np.random.randint(len(func_files), size=n_sample)
func_files_sample = np.array(func_files)[idx]

multi_masker = MultiNiftiMasker(mask_strategy='epi',
                                memory=CACHE_DIR,
                                n_jobs=1,
                                memory_level=2)
multi_masker.fit(func_files_sample)
plot_img(multi_masker.mask_img_)

n_components = 40
canica = CanICA(mask=multi_masker,
                n_components=n_components,
                smoothing_fwhm=6.,
                memory=CACHE_DIR,
                memory_level=5,
                threshold=3.,
                verbose=10,
                random_state=0)
canica.fit(func_files_sample)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
Exemplo n.º 36
0
    for val in np.unique(seg_data):
        if val > 0:
            t_data[(seg_data == val)] = t[idx]
            p_data[(seg_data == val)] = -np.log10(p[idx])
            idx += 1
            
    t_img = nib.Nifti1Image(t_data, seg_img.get_affine())
    p_img = nib.Nifti1Image(p_data, seg_img.get_affine())
    
    print np.max(t_data), np.min(t_data)
    print np.max(p_data), np.min(p_data)
    
    t_path = os.path.join('figures', 'tmap_'+gr[0]+'_'+gr[1]+'_baseline_adni_region')
    p_path = os.path.join('figures', 'pmap_'+gr[0]+'_'+gr[1]+'_baseline_adni_region')
    plotting.plot_img(t_img, black_bg=True, cmap=cm.bwr, title='/'.join(gr),
                      output_file=t_path, cut_coords=[0, 36, 0],
                      colorbar=True, vmin=-6.4, vmax=6.4)
    plotting.plot_stat_map(p_img, p_img, black_bg=True, title='/'.join(gr),
                      output_file=p_path, cut_coords=[0, 36, 0])

    t_nii_filename = '_'.join(['tmap', 'regions'])
    t_nii_filename += '_' + '_'.join(gr)
    t_nii_filename += '.nii'
    
    pval_nii_filename = '_'.join(['pvalmap', 'regions'])
    pval_nii_filename += '_' + '_'.join(gr)
    pval_nii_filename += '.nii'

    t_img.to_filename(os.path.join('figures', 'nii', t_nii_filename))
    p_img.to_filename(os.path.join('figures', 'nii', pval_nii_filename))
                      
#    docstring by typing ``nighres.brain.mp2rage_extract_brain_region?`` or
#    list them with ``cortex.keys()``
#
# To check if the extraction worked well we plot the GM and WM probabilities.
# You can also open the images stored in ``out_dir`` in
# your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# <http://nilearn.github.io/plotting/index.html#different-plotting-functions>`_
# .
if not skip_plots:
    plotting.plot_img(cortex['region_proba'],
                      vmin=0,
                      vmax=1,
                      cmap='autumn',
                      colorbar=True,
                      annotate=False,
                      draw_cross=False)
    plotting.plot_img(cortex['inside_proba'],
                      vmin=0,
                      vmax=1,
                      cmap='autumn',
                      colorbar=True,
                      annotate=False,
                      draw_cross=False)
############################################################################
# .. image:: ../_static/cortical_extraction1.png
############################################################################

############################################################################
Exemplo n.º 38
0
@author: nakagawa mariana
"""


import os
import numpy as np
import nibabel as nib
import matplotlib 
import matplotlib.pyplot as plt 
from skimage import io
from skimage import filters
from nilearn import datasets

img = nib.load(r'\Users\Escritorio\data\sub-01\anat\sub-01_T1w.nii.gz')

print (img)

affine = img.affine
print(affine) 

header = img.header['pixdim']
print(header)

print( img.get_data_dtype())

#plt.imshow(img)
from nilearn import plotting
plotting.plot_img(img, title="Prueba1")

plotting.show()
Exemplo n.º 39
0
args = parser.parse_args()

im1 = nib.load(args.image1)
print(f'im1 size: {im1.shape}')
data1 = im1.get_fdata()

im2 = nib.load(args.image2)
print(f'im2 size: {im2.shape}')
data2 = im2.get_fdata()

rmse = np.sqrt(np.mean((data1 - data2)**2, dtype=np.float64), dtype=np.float64)
mae = np.mean(np.abs(data1 - data2, dtype=np.float64), dtype=np.float64)

print(f'RMSE: {rmse}')
print(f'MAE: {mae}')

rel_diff = nib.Nifti1Image((data1 - data2) / (data1), im1.affine)
diff_name = args.reldiff
nib.save(rel_diff, diff_name + '.nii.gz')
print(f'Relative differences saved in {diff_name}.nii.gz')

plt = nilp.plot_img(rel_diff,
                    cmap=cm.seismic,
                    cut_coords=(-35, 12, 9),
                    vmin=-1,
                    vmax=1,
                    colorbar=True)
plt.savefig(diff_name + '.png')
print(f'PNG snapshot saved in {diff_name}.png')
Exemplo n.º 40
0
    def _reporting(self):
        """
        Returns
        -------
        displays : list
            A list of all displays to be rendered.

        """
        try:
            import matplotlib.pyplot as plt
            from nilearn import plotting
        except ImportError:
            with warnings.catch_warnings():
                mpl_unavail_msg = ('Matplotlib is not imported! '
                                   'No reports will be generated.')
                warnings.filterwarnings('always', message=mpl_unavail_msg)
                warnings.warn(category=ImportWarning, message=mpl_unavail_msg)
                return [None]

        if self._reporting_data is not None:
            labels_image = self._reporting_data['labels_image']
        else:
            labels_image = None

        if labels_image is not None:
            # Remove warning message in case where the masker was
            # previously fitted with no func image and is re-fitted
            if 'warning_message' in self._report_content:
                self._report_content['warning_message'] = None
            labels_image = image.load_img(labels_image, dtype=int)
            labels_image_data = image.get_data(labels_image)
            labels_image_affine = labels_image.affine
            # Number of regions excluding the background
            number_of_regions = np.sum(
                np.unique(labels_image_data) != self.background_label)
            # Basic safety check to ensure we have as many labels as we
            # have regions (plus background).
            if (self.labels is not None
                    and len(self.labels) != number_of_regions + 1):
                raise ValueError(("Mismatch between the number of provided "
                                  "labels ({0}) and the number of regions "
                                  "in provided label image ({1})").format(
                                      len(self.labels), number_of_regions + 1))
            self._report_content['number_of_regions'] = number_of_regions

            label_values = np.unique(labels_image_data)
            label_values = label_values[label_values != self.background_label]
            columns = [
                'label value', 'region name', 'size (in mm^3)',
                'relative size (in %)'
            ]
            if self.labels is None:
                columns.remove('region name')
            regions_summary = {c: [] for c in columns}
            for label in label_values:
                regions_summary['label value'].append(label)
                if self.labels is not None:
                    regions_summary['region name'].append(self.labels[label])
                size = len(labels_image_data[labels_image_data == label])
                voxel_volume = np.abs(
                    np.linalg.det(labels_image_affine[:3, :3]))
                regions_summary['size (in mm^3)'].append(
                    round(size * voxel_volume))
                regions_summary['relative size (in %)'].append(
                    round(
                        size / len(labels_image_data[labels_image_data != 0]) *
                        100, 2))
            self._report_content['summary'] = regions_summary

            img = self._reporting_data['img']
            # If we have a func image to show in the report, use it
            if img is not None:
                dim = image.load_img(img).shape
                if len(dim) == 4:
                    # compute middle image from 4D series for plotting
                    img = image.index_img(img, dim[-1] // 2)
                display = plotting.plot_img(img,
                                            black_bg=False,
                                            cmap='CMRmap_r')
                plt.close()
                display.add_contours(labels_image, filled=False, linewidths=3)

            # Otherwise, simply plot the ROI of the label image
            # and give a warning to the user
            else:
                msg = ("No image provided to fit in NiftiLabelsMasker. "
                       "Plotting ROIs of label image on the "
                       "MNI152Template for reporting.")
                warnings.warn(msg)
                self._report_content['warning_message'] = msg
                display = plotting.plot_roi(labels_image)
                plt.close()

            # If we have a mask, show its contours
            if self._reporting_data['mask'] is not None:
                display.add_contours(self._reporting_data['mask'],
                                     filled=False,
                                     colors="g",
                                     linewidths=3)
        else:
            self._report_content['summary'] = None
            display = None

        return [display]
Exemplo n.º 41
0
def plot_registration(reference_img, coregistered_img,
                      title="untitled coregistration!",
                      cut_coords=None,
                      display_mode='ortho',
                      cmap=None, close=False,
                      output_filename=None):
    """Plots a coregistered source as bg/contrast for the reference image

    Parameters
    ----------
    reference_img: string
        path to reference (background) image

    coregistered_img: string
        path to other image (to be compared with reference)

    display_mode: string (optional, defaults to 'ortho')
        display_mode param

    cmap: matplotlib colormap object (optional, defaults to spectral)
        colormap to user for plots

    output_filename: string (optional)
        path where plot will be stored

    """
    # sanity
    if cmap is None:
        cmap = plt.cm.gray  # registration QA always gray cmap!

    reference_img = load_vols(reference_img)[0]
    coregistered_img = load_vols(coregistered_img)[0]

    if cut_coords is None:
        cut_coords = (-10, -28, 17)

    if display_mode in ['x', 'y', 'z']:
        cut_coords = (cut_coords['xyz'.index(display_mode)],)

    # XXX nilearn complains about rotations in affine, etc.
    coregistered_img = reorder_img(coregistered_img, resample="continuous")

    _slicer = plot_img(coregistered_img, cmap=cmap, cut_coords=cut_coords,
                       display_mode=display_mode, black_bg=True)

    # XXX nilearn complains about rotations in affine, etc.
    reference_img = reorder_img(reference_img, resample="continuous")

    _slicer.add_edges(reference_img)

    # misc
    _slicer.title(title, size=12, color='w', alpha=0)

    if not output_filename is None:
        try:
            plt.savefig(output_filename, dpi=200, bbox_inches='tight',
                        facecolor="k", edgecolor="k")
            if close:
                plt.close()
        except AttributeError:
            # XXX TODO: handle this case!!
            pass
Exemplo n.º 42
0
                new_filename = filename.replace(s,'s005')
                os.chdir(filedir)
                os.rename(filename, new_filename)
                print new_filename
            
    anat_path = os.path.join(BASE_DIR, subject, 'MRI', 'T1MRI')
    anat_files = glob.glob(os.path.join(anat_path, '*.*'))
    for f in anat_files:
        filedir, filename = os.path.split(f)
        filename = f.split('/')[-1]
        if 'nobias' in filename:
            new_filename = filename.replace('nobias_Exam','S')
            print new_filename
            os.chdir(filedir)
            os.rename(filename, new_filename)
        elif 'Exam' in filename:
            new_filename = filename.replace('Exam','S')
            print new_filename
            os.chdir(filedir)
            os.rename(filename, new_filename)            
            
    hdr_files = glob.glob(os.path.join(anat_path, '*.hdr'))
    if len(hdr_files)>0:
        img = nib.load(hdr_files[0])
        new_img = image.reorder_img(img,resample='continuous')
        ax = plotting.plot_img(new_img)
        ax.title(subject)


    
    
Exemplo n.º 43
0
# Visualization

# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
mean_fmri = image.mean_img(fmri_img)

from nilearn.plotting import plot_stat_map, plot_img, show
searchlight_img = new_img_like(mean_fmri, searchlight.scores_)

# Because scores are not a zero-center test statistics, we cannot use
# plot_stat_map
plot_img(searchlight_img,
         bg_img=mean_fmri,
         title="Searchlight",
         display_mode="z",
         cut_coords=[-9],
         vmin=.42,
         cmap='hot',
         threshold=.2,
         black_bg=True)

# F_score results
p_ma = np.ma.array(p_unmasked, mask=np.logical_not(process_mask))
f_score_img = new_img_like(mean_fmri, p_ma)
plot_stat_map(f_score_img,
              mean_fmri,
              title="F-scores",
              display_mode="z",
              cut_coords=[-9],
              colorbar=False)
Exemplo n.º 44
0
import matplotlib.pyplot as plt

# Inputs
templates_paths = [
    'mni_icbm152_t1_tal_nlin_sym_09a.nii.gz',
    'mni_icbm152_gm_tal_nlin_sym_09a.nii.gz',
    'mni_icbm152_wm_tal_nlin_sym_09a.nii.gz'
]

brain_mask = load_img('mni_icbm152_t1_tal_nlin_sym_09a_mask.nii.gz')

for template_path in templates_paths:
    # Load template
    template = load_img(template_path)
    plot_img(template, colorbar=True)

    # Remove skull of whole-brain template
    if template_path == 'mni_icbm152_t1_tal_nlin_sym_09a.nii.gz':
        niimg = unmask(apply_mask(template, brain_mask), brain_mask)
        # plot_img(niimg, colorbar=True)
    else:
        niimg = template

    # Re-scale
    new_data = get_data(niimg)
    new_data /= np.max(new_data)
    new_data *= 255
    new = new_img_like(template, new_data)
    # plot_img(new, colorbar=True)
Exemplo n.º 45
0
# %%
# View the source space
# ---------------------

src.plot(subjects_dir=subjects_dir)

# %%
# We could write the mixed source space with::
#
#    >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
# We can also export source positions to NIfTI file and visualize it again:

nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True, overwrite=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')

# %%
# Compute the fwd matrix
# ----------------------
fwd = mne.make_forward_solution(
    fname_evoked,
    fname_trans,
    src,
    fname_bem,
    mindist=5.0,  # ignore sources<=5mm from innerskull
    meg=True,
    eeg=False,
    n_jobs=None)
del src  # save memory
Exemplo n.º 46
0
# You can change the axis (x , y , z ) by changing  display_mode = 'x' / 'y' / 'z'
#%%
Show_color = False

noise = Variable(torch.randn((1, 1000)).cuda())
fake_image = G(noise)
featmask = np.squeeze(fake_image[0].data.cpu().numpy())
featmask = nib.Nifti1Image(featmask, affine=np.eye(4))

arr1 = [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32]
arr2 = [34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60]
if Show_color:
    disp = plotting.plot_img(featmask,
                             cut_coords=arr1,
                             draw_cross=False,
                             annotate=False,
                             black_bg=True,
                             display_mode='x')
    # disp.annotate(size=25,left_right=False,positions=True)
    plotting.show()
    disp = plotting.plot_img(featmask,
                             cut_coords=arr2,
                             draw_cross=False,
                             annotate=False,
                             black_bg=True,
                             display_mode='x')
    # disp.annotate(size=25,left_right=False)
    plotting.show()
else:
    disp = plotting.plot_anat(featmask,
                              cut_coords=arr1,
src.plot(subjects_dir=subjects_dir)

n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))

###############################################################################
# We could write the mixed source space with::
#
#    >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
# We can also export source positions to nift file and visualize it again:

nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)

plotting.plot_img(nii_fname, cmap='nipy_spectral')

# Compute the fwd matrix
fwd = mne.make_forward_solution(
    fname_evoked, fname_trans, src, fname_bem,
    mindist=5.0,  # ignore sources<=5mm from innerskull
    meg=True, eeg=False, n_jobs=1)

leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)

src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))

# Load data
Exemplo n.º 48
0
kirby = datasets.fetch_kirby(subjects=[4])
raw_asl_file = kirby.asl[0]

# Create a memory context
from nipype.caching import Memory
cache_directory = '/tmp'
mem = Memory('/tmp')
os.chdir(cache_directory)
# Rescale
from procasl import preprocessing
rescale = mem.cache(preprocessing.Rescale)
out_rescale = rescale(in_file=raw_asl_file,
                      ss_tr=35.4,
                      t_i_1=800.,
                      t_i_2=1800.)

# Plot the first volume before and after rescaling
from nilearn import plotting
import matplotlib.pylab as plt
for filename, title in zip([raw_asl_file, out_rescale.outputs.rescaled_file],
                           ['raw', 'rescaled']):
    figure = plt.figure(figsize=(5, 4))
    first_scan_file = preprocessing.save_first_scan(filename)
    plotting.plot_img(first_scan_file,
                      figure=figure,
                      display_mode='z',
                      cut_coords=(65, ),
                      title=title,
                      colorbar=True)
plt.show()
Exemplo n.º 49
0
basic nilearn functionalities.
"""

# Let us use a Nifti file that is shipped with nilearn
from nilearn.datasets import MNI152_FILE_PATH

# Note that the variable MNI152_FILE_PATH is just a path to a Nifti file
print('Path to MNI152 template: %r' % MNI152_FILE_PATH)

#########################################################################
# A first step: looking at our data
# ----------------------------------
#
# Let's quickly plot this file:
from nilearn import plotting
plotting.plot_img(MNI152_FILE_PATH)

#########################################################################
# This is not a very pretty plot. We just used the simplest possible
# code. There is a whole :ref:`section of the documentation <plotting>`
# on making prettier code.
#
# **Exercise**: Try plotting one of your own files. In the above,
# MNI152_FILE_PATH is nothing more than a string with a path pointing to
# a nifti image. You can replace it with a string pointing to a file on
# your disk. Note that it should be a 3D volume, and not a 4D volume.

#########################################################################
# Simple image manipulation: smoothing
# -------------------------------------
#
def plot_tmaps():
    for gr in groups:
        for n_clusters in [100, 200, 500, 1000, 2000]:
            filename = '_'.join(['tmap', 'ward', str(n_clusters), '_'.join(gr) ])+'.nii'
            nii_img = os.path.join( NII_DIR, filename)
            for ext in ['.png', '.pdf', '.svg']:
                try:
                    vm = 7
                    plot_img(nii_img, bg_img=MNI_TEMPLATE, cmap=cm.cold_hot,
                             black_bg=True, threshold=4,
                             vmin = -vm, vmax=vm,
                             output_file=os.path.join('figures', 'release',
                                                      filename.split('.')[0])+ext,
                             title='/'.join(gr), colorbar=True)
                except ValueError:
                    plot_img(nii_img, bg_img=MNI_TEMPLATE, cmap=cm.cold_hot,
                             black_bg=True, threshold='auto',
                             vmin = -vm, vmax=vm,
                             output_file=os.path.join('figures', 'release',
                                                      filename.split('.')[0])+ext,
                             title='/'.join(gr), colorbar=True)