def createTFCEfMRIOverlayImages(folder,suffix,title='',vmax=8,display_mode='z',slices=range(-20,50,10),threshold=0.94999,plotToAxis=False,f=[],axes=[],colorbar=True,tight_layout=False,draw_cross=False,annotate=False):


    TFCEposImg,posImg,TFCEnegImg,negImg=getFileNamesfromFolder(folder,suffix)

    bg_img='./Templates/MNI152_.5mm_masked_edged.nii.gz'
    # threshold=0.949
    pos=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEposImg,threshold=threshold),img2=posImg)
    neg=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEnegImg,threshold=threshold),img2=negImg)
    fw=image.math_img("img1-img2",img1=pos,img2=neg)

    if plotToAxis:
        display=plotting.plot_stat_map(fw,display_mode=display_mode,threshold=0,
                                       cut_coords=slices,vmax=vmax,colorbar=colorbar,
                                       bg_img=bg_img,black_bg=False,title=title,dim=0,
                                       figure=f,axes=axes,draw_cross=draw_cross,
                                       annotate=annotate)
    else:
        display=plotting.plot_stat_map(fw,display_mode=display_mode,threshold=0,
        cut_coords=slices,vmax=vmax,colorbar=colorbar,bg_img=bg_img,
        black_bg=False,title=title,dim=0,annotate=annotate)

    if tight_layout:
        display.tight_layout()

    return display
Example #2
0
def test_isnan_threshold_img_data():
    shape = (10, 10, 10)
    maps, _ = testing.generate_maps(shape, n_regions=2)
    data = maps.get_data()
    data[:, :, 0] = np.nan

    maps_img = nibabel.Nifti1Image(data, np.eye(4))
    # test threshold_img to converge properly when input image has nans.
    threshold_img(maps_img, threshold=0.8)
Example #3
0
def test_isnan_threshold_img_data():
    shape = (10, 10, 10)
    maps, _ = data_gen.generate_maps(shape, n_regions=2)
    data = maps.get_data()
    data[:, :, 0] = np.nan

    maps_img = nibabel.Nifti1Image(data, np.eye(4))
    # test threshold_img to converge properly when input image has nans.
    threshold_img(maps_img, threshold=0.8)
Example #4
0
def test_isnan_threshold_img_data():
    """Check that threshold_img converges properly when input image has nans.
    """
    shape = (10, 10, 10)
    maps, _ = data_gen.generate_maps(shape, n_regions=2)
    data = get_data(maps)
    data[:, :, 0] = np.nan

    maps_img = nibabel.Nifti1Image(data, np.eye(4))
    threshold_img(maps_img, threshold=0.8)
def make_pysurfer_images_lh_rh(folder,suffix='cope1',hemi='lh',threshold=0.9499,coords=(),surface='inflated',fwhm=0,filename='',saveFolder=[],vmax=5.0,bsize=5):
    from surfer import Brain, io
    TFCEposImg,posImg,TFCEnegImg,negImg=getFileNamesfromFolder(folder,suffix)

    pos=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEposImg,threshold=threshold),img2=posImg)
    neg=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEnegImg,threshold=threshold),img2=negImg)
    fw=image.math_img("img1-img2",img1=pos,img2=neg)

    if fwhm==0:
        smin=np.min(np.abs(fw.get_data()[fw.get_data()!=0]))
    else:
        smin=2

    mri_file = "%s/thresholded_posneg.nii.gz" % folder
    fw.to_filename(mri_file)

    """Bring up the visualization"""
    brain = Brain("fsaverage",hemi,surface, offscreen=True , background="white")

    """Project the volume file and return as an array"""

    reg_file = os.path.join("/opt/freesurfer","average/mni152.register.dat")
    surf_data = io.project_volume_data(mri_file, hemi, reg_file,smooth_fwhm=fwhm)
    #  surf_data_rh = io.project_volume_data(mri_file, "rh", reg_file,smooth_fwhm=fwhm)


    """
    You can pass this array to the add_overlay method for a typical activation
    overlay (with thresholding, etc.).
    """
    brain.add_overlay(surf_data, min=smin, max=vmax, name="activation", hemi=hemi)
    # brain.overlays["activation"]
    # brain.add_overlay(surf_data_rh, min=smin, max=5, name="ang_corr_rh", hemi='rh')

    if len(coords)>0:
        if coords[0]>0:
            hemi2='rh'
        else:
            hemi2='lh'
        brain.add_foci(coords, map_surface="pial", color="gold",hemi=hemi2)

    if len(saveFolder)>0:
        folder=saveFolder
        image_out=brain.save_montage('%s/%s-%s.png' % (folder,hemi,filename),order=['l','m'],orientation='h',border_size=bsize,colorbar=None)

    else:
        image_out=brain.save_image('%s/surfaceplot.jpg' % folder)
    brain.close()
    return image_out
Example #6
0
def test_threshold_img():
    # to check whether passes with valid threshold inputs
    shape = (10, 20, 30)
    maps, _ = testing.generate_maps(shape, n_regions=4)
    affine = np.eye(4)
    mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine)

    for img in iter_img(maps):
        # when threshold is a float value
        thr_maps_img = threshold_img(img, threshold=0.8)
        # when we provide mask image
        thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img)
        # when threshold is a percentile
        thr_maps_percent2 = threshold_img(img, threshold='2%')
Example #7
0
def test_threshold_img():
    # to check whether passes with valid threshold inputs
    shape = (10, 20, 30)
    maps, _ = data_gen.generate_maps(shape, n_regions=4)
    affine = np.eye(4)
    mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine)

    for img in iter_img(maps):
        # when threshold is a float value
        thr_maps_img = threshold_img(img, threshold=0.8)
        # when we provide mask image
        thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img)
        # when threshold is a percentile
        thr_maps_percent2 = threshold_img(img, threshold='2%')
Example #8
0
    def _generate_report(self):
        """Generate a reportlet."""
        NIWORKFLOWS_LOG.info('Generating visual report')

        refnii = load_img(self.inputs.reference)
        fmapnii = load_img(self.inputs.fieldmap)
        contour_nii = load_img(self.inputs.mask) if isdefined(
            self.inputs.mask) else None
        mask_nii = threshold_img(refnii, 1e-3)
        cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
        fmapdata = fmapnii.get_fdata()
        vmax = max(fmapdata.max(), abs(fmapdata.min()))

        # Call composer
        compose_view(plot_registration(refnii,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label='reference',
                                       contour=contour_nii,
                                       compress=False),
                     plot_registration(fmapnii,
                                       'moving-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label='fieldmap (Hz)',
                                       contour=contour_nii,
                                       compress=False,
                                       plot_params={
                                           'cmap': coolwarm_transparent(),
                                           'vmax': vmax,
                                           'vmin': -vmax
                                       }),
                     out_file=self._out_report)
Example #9
0
    def _generate_report(self):
        """Generates the visual report."""
        from niworkflows.viz.utils import plot_registration

        NIWORKFLOWS_LOG.info("Generating visual report")

        anat = load_img(self._anat_file)
        contour_nii = load_img(
            self._contour) if self._contour is not None else None

        if self._mask_file:
            anat = unmask(apply_mask(anat, self._mask_file), self._mask_file)
            mask_nii = load_img(self._mask_file)
        else:
            mask_nii = threshold_img(anat, 1e-3)

        n_cuts = 7
        if not self._mask_file and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(
                anat,
                "fixed-image",
                estimate_brightness=True,
                cuts=cuts,
                contour=contour_nii,
                compress=self.inputs.compress_report,
            ),
            [],
            out_file=self._out_report,
        )
Example #10
0
    def _generate_report(self):
        """ Generates the visual report """
        from niworkflows.viz.utils import plot_registration
        NIWORKFLOWS_LOG.info('Generating visual report')

        anat = load_img(self._anat_file)
        contour_nii = load_img(self._contour) if self._contour is not None else None

        if self._mask_file:
            anat = unmask(apply_mask(anat, self._mask_file), self._mask_file)
            mask_nii = load_img(self._mask_file)
        else:
            mask_nii = threshold_img(anat, 1e-3)

        n_cuts = 7
        if not self._mask_file and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(anat, 'fixed-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            [],
            out_file=self._out_report
        )
Example #11
0
def process_img(stat_img, cluster_extent, voxel_thresh=1.96):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    cluster_extent : int
        Minimum number of voxels required to consider a cluster
    voxel_thresh : int, optional
        Threshold to apply to `stat_img`. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    else:
        # ensure that threshold is not greater than most extreme value in image
        if voxel_thresh > np.abs(stat_img.get_data()).max():
            empty = np.zeros(stat_img.shape + (1,))
            return image.new_img_like(stat_img, empty)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    min_region_size = cluster_extent * np.prod(thresh_img.header.get_zooms())
    clusters = []
    for sign in ['pos', 'neg']:
        # keep only data of given sign
        data = thresh_img.get_data().copy()
        data[(data < 0) if sign == 'pos' else (data > 0)] = 0

        # Do nothing if data array contains only zeros
        if np.any(data):
            try:
                clusters += [connected_regions(
                    image.new_img_like(thresh_img, data),
                    min_region_size=min_region_size,
                    extract_type='connected_components')[0]]
            except TypeError:  # for no clusters
                pass

    # Return empty image if no clusters were found
    if len(clusters) == 0:
        return image.new_img_like(thresh_img, np.zeros(data.shape + (1,)))

    # Reorder clusters by their size
    clust_img = image.concat_imgs(clusters)
    cluster_size = (clust_img.get_data() != 0).sum(axis=(0, 1, 2))
    new_order = np.argsort(cluster_size)[::-1]
    clust_img_ordered = image.index_img(clust_img, new_order)

    return clust_img_ordered
Example #12
0
def process_img(stat_img, voxel_thresh=1.96, cluster_extent=20):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    voxel_thresh : int, optional
        Threshold to apply to `stat_img`. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96
    cluster_extent : int, optional
        Minimum number of voxels required to consider a cluster. Default: 20

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    cluster_img = clusterize_img(thresh_img, cluster_extent=cluster_extent)

    return cluster_img
Example #13
0
File: plot.py Project: jerdra/niviz
def plot_orthogonal_views(data,
                          bbox_nii=None,
                          auto_brightness=False,
                          display_modes=['x', 'y', 'z'],
                          n_cuts=10,
                          plot_func=nplot.plot_anat,
                          figure_title=""):

    if bbox_nii is None:
        bbox_nii = nimg.threshold_img(data, 1e-3)

    cuts = cuts_from_bbox(data, cuts=n_cuts)
    if auto_brightness:
        robust_params = robust_set_limits(bbox_nii.get_fdata().reshape(-1), {})
    else:
        robust_params = {}

    svgs = []
    for d in display_modes:
        plot_params = {
            "display_mode": d,
            "cut_coords": cuts[d],
            **robust_params
        }
        display = plot_func(data, **plot_params)
        svg = extract_svg(display)
        svg = svg.replace("figure_1", f"{figure_title}-{d}")
        svgs.append(fromstring(svg))
        display.close()
    return svgs
Example #14
0
def plotstatsimg(cbf,
                 ref_vol,
                 plot_params=None,
                 order=('z', 'x', 'y'),
                 estimate_brightness=False,
                 label=None,
                 compress='auto'):
    """
    plot zsts
    """
    plot_params = {} if plot_params is None else plot_params

    image_nii = _3d_in_file(cbf)
    data = image_nii.get_fdata()
    from nilearn.image import threshold_img
    bbox_nii = threshold_img(nb.load(cbf), 1)

    cuts = cuts_from_bbox(bbox_nii, cuts=7)

    out_files = []
    if estimate_brightness:
        plot_params = robust_set_limits(data, plot_params)

    # Plot each cut axis
    for i, mode in enumerate(list(order)):
        plot_params['display_mode'] = mode
        plot_params['cut_coords'] = cuts[mode]
        plot_params['draw_cross'] = False
        plot_params['symmetric_cbar'] = False
        plot_params['threshold'] = 0.05
        plot_params['vmax'] = 90
        plot_params['colorbar'] = False
        plot_params['cmap'] = 'gray'
        if i == 0:
            plot_params['title'] = label
            plot_params['colorbar'] = False
        else:
            plot_params['title'] = None

        # Generate nilearn figure
        from nilearn.plotting import plot_stat_map
        display = plot_stat_map(stat_map_img=cbf,
                                bg_img=ref_vol,
                                **plot_params)
        svg = extract_svg(display, compress=compress)
        display.close()
        from lxml import etree
        from .. import NIWORKFLOWS_LOG
        # Find and replace the figure_1 id.
        try:
            xml_data = etree.fromstring(svg)
        except etree.XMLSyntaxError as e:
            NIWORKFLOWS_LOG.info(e)
            return

        svg_fig = SVGFigure()
        svg_fig.root = xml_data
        out_files.append(svg_fig)

    return out_files
Example #15
0
def test_validity_threshold_value_in_threshold_img():
    shape = (6, 8, 10)
    maps, _ = data_gen.generate_maps(shape, n_regions=2)

    # testing to raise same error when threshold=None case
    with pytest.raises(ValueError,
                       match="The input parameter 'threshold' is empty. "):
        threshold_img(maps, threshold=None)

    invalid_threshold_values = ['90t%', 's%', 't', '0.1']
    name = 'threshold'
    for thr in invalid_threshold_values:
        with pytest.raises(ValueError,
                           match='{0}.+should be a number followed by '
                                 'the percent sign'.format(name)):
            threshold_img(maps, threshold=thr)
Example #16
0
def reference_model(X_train, X_test, y_train, y_test, outputpath, estimator):

    #create the scores array using Ridge and save the images for further comparison
    estimator.fit(X_train, y_train)
    predictions = estimator.predict(X_test)
    scores = r2_score(y_test, predictions, multioutput='raw_values')
    scores[scores < 0] = 0

    score_map_img = masker.inverse_transform(scores)
    final_img = threshold_img(score_map_img, threshold=1e-6)

    plotting.plot_stat_map(final_img,
                           bg_img=meanepi,
                           cut_coords=5,
                           display_mode='z',
                           aspect=1.25,
                           threshold=1e-6,
                           title="Results for the reference (no segmentation)")
    #We save the images
    img4_name = 'ref_img.png'
    img4_path = os.path.join(outputpath, img4_name)
    plt.savefig(img4_path)
    print("Images saved successfully")
    plt.close()

    return scores
def _region_extractor_cache(maps_img, mask_img=None, min_region_size=2500,
                            threshold=1., thresholding_strategy='ratio_n_voxels',
                            extractor='local_regions',
                            memory=Memory(cachedir=None), memory_level=0):
    """Region Extraction with caching built upon helper functions

    See nilearn.regions.RegionExtractor for documentation and related
    """

    if thresholding_strategy == 'ratio_n_voxels':
        print("[Thresholding] using maps ratio with threshold={0}"
              " ".format(threshold))
        threshold_maps = _threshold_maps_ratio(maps_img, threshold)
    else:
        if thresholding_strategy == 'percentile':
            threshold = "{0}%".format(threshold)
            print("[Thresholding] using threshold_img with threshold={0}"
                  " ".format(threshold))
        threshold_maps = threshold_img(maps_img, mask_img=mask_img,
                                       threshold=threshold)
    print("Done thresholding")
    print("[Region Extraction] with {0}".format(extractor))
    regions_img, _ = cache(connected_regions, memory,
                           memory_level=memory_level,
                           func_memory_level=1)(threshold_maps,
                                                min_region_size=min_region_size,
                                                extract_type=extractor)
    return regions_img
Example #18
0
def plot_segs(image_nii,
              seg_niis,
              mask_nii,
              out_file,
              masked=False,
              title=None,
              compress='auto',
              **plot_params):
    """ plot segmentation as contours over the image (e.g. anatomical).
    seg_niis should be a list of files. mask_nii helps determine the cut
    coordinates. plot_params will be passed on to nilearn plot_* functions. If
    seg_niis is a list of size one, it behaves as if it was plotting the mask.
    """
    def _plot_anat_with_contours(image, segs=None, **plot_params):
        assert segs is not None
        assert len(segs) <= 3
        plot_params = {} if plot_params is None else plot_params

        # anatomical
        svg = plot_anat(image, **plot_params)

        # segment contours
        for seg, color in zip(segs, ['r', 'b']):
            plot_params['colors'] = color
            plot_params['levels'] = [
                0.5
            ] if 'levels' not in plot_params else plot_params['levels']
            plot_params['alpha'] = 1
            plot_params['linewidths'] = 0.7
            svg.add_contours(seg, **plot_params)

        svgs_list.append(extract_svg(svg, compress=compress))
        svg.close()

    plot_params = {} if plot_params is None else plot_params

    image_nii = _3d_in_file(image_nii)
    data = image_nii.get_data()

    plot_params = robust_set_limits(data, plot_params)

    seg_niis = filemanip.filename_to_list(seg_niis)
    mask_nii = nb.load(mask_nii) if masked else nlimage.threshold_img(
        mask_nii, 1e-3)

    cuts = cuts_from_bbox(mask_nii, cuts=7)

    svgs_list = []
    plot_xyz(image_nii,
             _plot_anat_with_contours,
             cuts,
             segs=seg_niis,
             **plot_params)

    save_html(template='segmentation.tpl',
              report_file_name=out_file,
              unique_string='seg' + str(uuid4()),
              base_image='<br />'.join(svgs_list),
              title=title)
Example #19
0
    def _generate_report(self):
        """Generate the visual report."""
        from nilearn.image import threshold_img, load_img
        from nilearn.masking import apply_mask, unmask
        from niworkflows.viz.utils import plot_registration

        NIWORKFLOWS_LOG.info("Generating visual report")

        fixed_image_nii = load_img(self._fixed_image)
        moving_image_nii = load_img(self._moving_image)
        contour_nii = load_img(
            self._contour) if self._contour is not None else None

        if self._fixed_image_mask:
            fixed_image_nii = unmask(
                apply_mask(fixed_image_nii, self._fixed_image_mask),
                self._fixed_image_mask,
            )
            # since the moving image is already in the fixed image space we
            # should apply the same mask
            moving_image_nii = unmask(
                apply_mask(moving_image_nii, self._fixed_image_mask),
                self._fixed_image_mask,
            )
            mask_nii = load_img(self._fixed_image_mask)
        else:
            mask_nii = threshold_img(fixed_image_nii, 1e-3)

        n_cuts = 7
        if not self._fixed_image_mask and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(
                fixed_image_nii,
                "fixed-image",
                estimate_brightness=True,
                cuts=cuts,
                label=self._fixed_image_label,
                contour=contour_nii,
                compress=self.inputs.compress_report,
                dismiss_affine=self._dismiss_affine,
            ),
            plot_registration(
                moving_image_nii,
                "moving-image",
                estimate_brightness=True,
                cuts=cuts,
                label=self._moving_image_label,
                contour=contour_nii,
                compress=self.inputs.compress_report,
                dismiss_affine=self._dismiss_affine,
            ),
            out_file=self._out_report,
        )
Example #20
0
    def _generate_report(self):
        """Generate a reportlet."""
        LOGGER.info('Generating denoising visual report')

        input_dwi, denoised_nii, _ = self._get_plotting_images()

        # find an image to use as the background
        image_data = input_dwi.get_fdata()
        image_intensities = np.array([img.mean() for img in image_data.T])
        lowb_index = int(np.argmax(image_intensities))
        highb_index = int(np.argmin(image_intensities))

        # Original images
        orig_lowb_nii = input_dwi.slicer[..., lowb_index]
        orig_highb_nii = input_dwi.slicer[..., highb_index]

        # Denoised images
        denoised_lowb_nii = denoised_nii.slicer[..., lowb_index]
        denoised_highb_nii = denoised_nii.slicer[..., highb_index]

        # Find spatial extent of the image
        contour_nii = mask_nii = None
        if isdefined(self.inputs.mask):
            contour_nii = load_img(self.inputs.mask)
        else:
            mask_nii = threshold_img(denoised_lowb_nii, 50)
        cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)

        diff_lowb_nii = nb.Nifti1Image(orig_lowb_nii.get_fdata() -
                                       denoised_lowb_nii.get_fdata(),
                                       affine=denoised_lowb_nii.affine)
        diff_highb_nii = nb.Nifti1Image(orig_highb_nii.get_fdata() -
                                        denoised_highb_nii.get_fdata(),
                                        affine=denoised_highb_nii.affine)

        # Call composer
        compose_view(plot_denoise(denoised_lowb_nii,
                                  denoised_highb_nii,
                                  'moving-image',
                                  estimate_brightness=True,
                                  cuts=cuts,
                                  label='De-Gibbs',
                                  lowb_contour=None,
                                  highb_contour=None,
                                  compress=False),
                     plot_denoise(diff_lowb_nii,
                                  diff_highb_nii,
                                  'fixed-image',
                                  estimate_brightness=True,
                                  cuts=cuts,
                                  label="Estimated Ringing",
                                  lowb_contour=None,
                                  highb_contour=None,
                                  compress=False),
                     out_file=self._out_report)

        self._calculate_nmse(input_dwi, denoised_nii)
Example #21
0
def test_threshold_img_copy():
    """Test the behavior of threshold_img's copy parameter."""
    img_ones = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4))

    # Check that copy does not mutate. It returns modified copy.
    thresholded = threshold_img(img_ones, 2)  # threshold 2 > 1

    # Original img_ones should have all ones.
    assert_array_equal(get_data(img_ones), np.ones((10, 10, 10, 10)))
    # Thresholded should have all zeros.
    assert_array_equal(get_data(thresholded), np.zeros((10, 10, 10, 10)))

    # Check that not copying does mutate.
    img_to_mutate = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4))
    thresholded = threshold_img(img_to_mutate, 2, copy=False)
    # Check that original mutates
    assert_array_equal(get_data(img_to_mutate), np.zeros((10, 10, 10, 10)))
    # And that returned value is also thresholded.
    assert_array_equal(get_data(img_to_mutate), get_data(thresholded))
Example #22
0
def test_threshold_img_copy():

    img_zeros = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4))
    img_ones = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4))

    # Check that copy does not mutate. It returns modified copy.
    thresholded = threshold_img(img_ones, 2)  # threshold 2 > 1

    # Original img_ones should have all ones.
    assert_array_equal(img_ones.get_data(), np.ones((10, 10, 10, 10)))
    # Thresholded should have all zeros.
    assert_array_equal(thresholded.get_data(), np.zeros((10, 10, 10, 10)))

    # Check that not copying does mutate.
    img_to_mutate = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4))
    thresholded = threshold_img(img_to_mutate, 2, copy=False)
    # Check that original mutates
    assert_array_equal(img_to_mutate.get_data(), np.zeros((10, 10, 10, 10)))
    # And that returned value is also thresholded.
    assert_array_equal(img_to_mutate.get_data(), thresholded.get_data())
Example #23
0
def test_validity_threshold_value_in_threshold_img():
    """Check that invalid values to threshold_img's threshold parameter raise
    Exceptions.
    """
    shape = (6, 8, 10)
    maps, _ = data_gen.generate_maps(shape, n_regions=2)

    # testing to raise same error when threshold=None case
    with pytest.raises(
            TypeError,
            match="threshold should be either a number or a string",
    ):
        threshold_img(maps, threshold=None)

    invalid_threshold_values = ['90t%', 's%', 't', '0.1']
    name = 'threshold'
    for thr in invalid_threshold_values:
        with pytest.raises(ValueError,
                           match='{0}.+should be a number followed by '
                           'the percent sign'.format(name)):
            threshold_img(maps, threshold=thr)
Example #24
0
def plot_segs(image_nii,
              seg_niis,
              out_file,
              bbox_nii=None,
              masked=False,
              colors=None,
              compress="auto",
              **plot_params):
    """
    Generate a static mosaic with ROIs represented by their delimiting contour.

    Plot segmentation as contours over the image (e.g. anatomical).
    seg_niis should be a list of files. mask_nii helps determine the cut
    coordinates. plot_params will be passed on to nilearn plot_* functions. If
    seg_niis is a list of size one, it behaves as if it was plotting the mask.
    """
    from svgutils.transform import fromstring
    from nilearn import image as nlimage

    plot_params = {} if plot_params is None else plot_params

    image_nii = _3d_in_file(image_nii)
    canonical_r = rotation2canonical(image_nii)
    image_nii = rotate_affine(image_nii, rot=canonical_r)
    seg_niis = [
        rotate_affine(_3d_in_file(f), rot=canonical_r) for f in seg_niis
    ]
    data = image_nii.get_fdata()

    plot_params = robust_set_limits(data, plot_params)

    bbox_nii = (image_nii if bbox_nii is None else rotate_affine(
        _3d_in_file(bbox_nii), rot=canonical_r))

    if masked:
        bbox_nii = nlimage.threshold_img(bbox_nii, 1e-3)

    cuts = cuts_from_bbox(bbox_nii, cuts=7)
    plot_params["colors"] = colors or plot_params.get("colors", None)
    out_files = []
    for d in plot_params.pop("dimensions", ("z", "x", "y")):
        plot_params["display_mode"] = d
        plot_params["cut_coords"] = cuts[d]
        svg = _plot_anat_with_contours(image_nii,
                                       segs=seg_niis,
                                       compress=compress,
                                       **plot_params)
        # Find and replace the figure_1 id.
        svg = svg.replace("figure_1", "segmentation-%s-%s" % (d, uuid4()), 1)
        out_files.append(fromstring(svg))

    return out_files
Example #25
0
def second_processing(masker,filename_irm,loaded_stimuli,main_path,meanepi,alpha,nbc,num_clust,best_run=None):
   
    loaded_stimuli = np.load(filename_stimuli)
    fmri_data = masker.transform(filename_irm)
    fmri_ready = fmri_data[17:-(fmri_data.shape[0]-17-loaded_stimuli.shape[0])]
    X_train,y_train,X_test,y_test = retrieve_data(loaded_stimuli,fmri_ready)

    #getting the scores 
    if base_model == True:
        if optimized == True: 
            print('Useless argument -o provided. Using the sklearn MLP nonetheless...') 
        else: 
            pass
        estimator = MLPRegressor(hidden_layer_sizes=(500),verbose=True,batch_size=50,alpha=0.1,learning_rate_init=0.0001)
        estimator.fit(X_train,y_train)
        predictions = estimator.predict(X_test)

    else: 
        
        if optimized == True: 
            predictions = optimized_model(best_run,X_train,y_train,X_test,y_test)
        #Use the basic keras model
        else:
            predictions = keras_equivalent(X_train,y_train,X_test,y_test) 
    
    #we process the scores 
    scores = r2_score(y_test, predictions, multioutput='raw_values')
    scores[scores < 0] = 0

    #we display the important results 
    Y_dim = y_train.shape
    unique, counts = np.unique(scores, return_counts=True)
    maxi = max(unique)
    occurence = max(counts)
    print('\nscores max: ', maxi)
    print('scores equals to 0: ',occurence)
    print('scores above 0: ',Y_dim[1]-occurence,'\n')
    plt.figure()

    #plotting the results
    mean_score_map_img = masker.inverse_transform(scores)
    mean_img = threshold_img(mean_score_map_img, threshold=1e-6)

    #Plot the mean image of all images obtained by the different segmentation  
    plotting.plot_stat_map(mean_img, bg_img=meanepi, cut_coords=5, display_mode='z', aspect=1.25, threshold=1e-6,
        title="Mean image")

    #We save the image 
    img2_name = 'mean_img_label_'+str(nbc)+'.png'
    img2_path = os.path.join(main_path,img2_name)
    plt.savefig(img2_path)
    plt.close()
Example #26
0
    def _generate_report(self):
        """Generate a reportlet."""
        NIWORKFLOWS_LOG.info('Generating visual report')

        movnii = refnii = load_img(self.inputs.reference)
        fmapnii = load_img(self.inputs.fieldmap)

        if isdefined(self.inputs.moving):
            movnii = load_img(self.inputs.moving)

        contour_nii = mask_nii = None
        if isdefined(self.inputs.mask):
            contour_nii = load_img(self.inputs.mask)
            maskdata = contour_nii.get_fdata() > 0
        else:
            mask_nii = threshold_img(refnii, 1e-3)
            maskdata = mask_nii.get_fdata() > 0
        cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
        fmapdata = fmapnii.get_fdata()
        vmax = max(abs(np.percentile(fmapdata[maskdata], 99.8)),
                   abs(np.percentile(fmapdata[maskdata], 0.2)))

        fmap_overlay = [{
            'overlay': fmapnii,
            'overlay_params': {
                'cmap': coolwarm_transparent(max_alpha=self.inputs.max_alpha),
                'vmax': vmax,
                'vmin': -vmax,
            }
        }] * 2

        if self.inputs.show != 'both':
            fmap_overlay[not self.inputs.show] = {}

        # Call composer
        compose_view(plot_registration(movnii,
                                       'moving-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label=self.inputs.moving_label,
                                       contour=contour_nii,
                                       compress=False,
                                       **fmap_overlay[1]),
                     plot_registration(refnii,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label=self.inputs.reference_label,
                                       contour=contour_nii,
                                       compress=False,
                                       **fmap_overlay[0]),
                     out_file=self._out_report)
Example #27
0
def reference_model(X_train,
                    X_test,
                    y_train,
                    y_test,
                    masker,
                    outputpath,
                    estimator,
                    meanepi,
                    layer=0,
                    subject=0,
                    worth_saving=False):

    estimator.fit(X_train, y_train)
    predictions = estimator.predict(X_test)
    scores = r2_score(y_test, predictions, multioutput='raw_values')
    scores[scores < 0] = 0
    maxref = np.max(scores)

    if worth_saving == True:
        savingpath = os.path.join(outputpath, 'layer_' + str(layer))
        create_saving_folder(savingpath)

        score_map_img = masker.inverse_transform(scores)
        final_img = threshold_img(score_map_img, threshold=1e-6)
        plotting.plot_stat_map(
            final_img,
            bg_img=meanepi,
            cut_coords=5,
            display_mode='z',
            aspect=1.25,
            threshold=1e-6,
            title="Results for the reference (no segmentation)")

        #We save the images
        img1_name = 'ref_img_layer' + str(layer) + '_sub_' + str(
            subject) + '.png'
        img1_path = os.path.join(savingpath, img1_name)
        plt.savefig(img1_path)

        #we save de data of the images
        img2_name = 'ref_img_layer' + str(layer) + '_sub_' + str(
            subject) + '.nii.gz'
        img2_path = os.path.join(savingpath, img2_name)
        final_img.to_filename(img2_path)

        print("Images saved successfully\n")
        plt.close()

        return None

    return maxref
Example #28
0
def simple_mixture(data,
                   index=None,
                   agg='mean',
                   fwhm=None,
                   threshold=None,
                   **kwargs):
    # extract array
    X = data
    if hasattr(X, 'X'):
        X = data.X
    if hasattr(X, 'data'):
        X = X.data
    if hasattr(X, 'values'):
        X = X.values

    # extract rows based on index
    mm_X = X[list(index), :].copy()
    mm_pos_X = np.copy(mm_X)
    mm_neg_X = np.copy(mm_X)

    # zero out pos, neg in opposite array
    mm_pos_X[(mm_X < 0.0)] = 0.0
    mm_neg_X[(mm_X > 0.0)] = 0.0

    # means
    # TODO: use StandardScaler().fit_transform(...)
    mean_pos_X = mm_pos_X.mean(axis=0) / (mm_pos_X.std(axis=0) +
                                          1) * len(index)
    mean_neg_X = mm_neg_X.mean(axis=0) / (mm_neg_X.std(axis=0) +
                                          1) * len(index)
    stack_pos_neg_X = np.stack([mean_pos_X, mean_neg_X])

    # agg over stack
    np_agg = agg if callable(agg) else getattr(np, agg)
    pos_neg_X = np_agg(stack_pos_neg_X, axis=0, keepdims=True)
    mm_X = mm_X.mean(axis=0, keepdims=True)  #pos_neg_X.astype(np.float32)

    # unmask
    #mm_X = scipy.stats.zscore(mm_X, axis=-1)
    mm_img = data.masker.inverse_transform(mm_X)

    # clean
    #mm_img = image.clean_img(mm_img, standardize=True)

    # smooth, threshold
    if fwhm is not None:
        mm_img = image.smooth_img(mm_img, fwhm=fwhm)
    if threshold is not None:
        mm_img = image.threshold_img(mm_img, threshold)
    return mm_img
Example #29
0
    def _generate_report(self):
        '''Make a composite for co-registration of surface and volume images'''

        import trimesh

        l_surf = nib.load(self._surf_l)
        r_surf = nib.load(self._surf_r)
        vol_img = nib.load(self._bg_nii)

        if vol_img.ndim == 4:
            vol_img = vol_img.slicer[:, :, :, 0]

        verts, trigs, offset = niviz.surface.gifti_get_full_brain_mesh(
            l_surf, r_surf)

        mesh = trimesh.Trimesh(vertices=verts, faces=trigs)
        mask_nii = nimg.threshold_img(vol_img, 1e-3)
        cuts = cuts_from_bbox(mask_nii, cuts=self._ncuts)

        sections = mesh.section_multiplane(plane_normal=[0, 0, 1],
                                           plane_origin=[0, 0, 0],
                                           heights=cuts['z'])

        zh = nplot.plot_anat(vol_img, display_mode='z', cut_coords=cuts['z'])

        for z, s in zip(cuts['z'], sections):
            ax = zh.axes[z].ax
            if s:
                for segs in s.discrete:
                    ax.plot(*segs.T, color='r', linewidth=0.5)

        if self._fg_nii:
            fg_img = nib.load(self._fg_nii).slicer[:, :, :, 0]
            fg_img = nimg.resample_to_img(fg_img,
                                          vol_img,
                                          interpolation="linear")
            # Custom colormap with transparencies
            ncolors = 256
            basecmap = 'viridis_r'
            color_array = plt.get_cmap(basecmap)(range(ncolors))
            color_array[:, -1] = np.linspace(1.0, 0.0, ncolors)

            # Set background intensity=0 to transparent
            color_array[0, :] = 0
            cmapviridis = mcolors.LinearSegmentedColormap.from_list(
                basecmap, colors=color_array)

            zh.add_overlay(fg_img, cmap=cmapviridis)

        zh.savefig(self._out_report)
Example #30
0
def plot_segs(image_nii,
              seg_niis,
              out_file,
              bbox_nii=None,
              masked=False,
              colors=None,
              compress='auto',
              **plot_params):
    """ plot segmentation as contours over the image (e.g. anatomical).
    seg_niis should be a list of files. mask_nii helps determine the cut
    coordinates. plot_params will be passed on to nilearn plot_* functions. If
    seg_niis is a list of size one, it behaves as if it was plotting the mask.
    """
    plot_params = {} if plot_params is None else plot_params

    image_nii = _3d_in_file(image_nii)
    data = image_nii.get_data()

    plot_params = robust_set_limits(data, plot_params)

    bbox_nii = nb.load(image_nii if bbox_nii is None else bbox_nii)
    if masked:
        bbox_nii = nlimage.threshold_img(bbox_nii, 1e-3)

    cuts = cuts_from_bbox(bbox_nii, cuts=7)
    plot_params['colors'] = colors or plot_params.get('colors', None)
    out_files = []
    for d in plot_params.pop('dimensions', ('z', 'x', 'y')):
        plot_params['display_mode'] = d
        plot_params['cut_coords'] = cuts[d]
        svg = _plot_anat_with_contours(image_nii,
                                       segs=seg_niis,
                                       compress=compress,
                                       **plot_params)

        # Find and replace the figure_1 id.
        try:
            xml_data = etree.fromstring(svg)
        except etree.XMLSyntaxError as e:
            NIWORKFLOWS_LOG.info(e)
            return
        find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS)
        find_text(xml_data)[0].set('id', 'segmentation-%s-%s' % (d, uuid4()))

        svg_fig = SVGFigure()
        svg_fig.root = xml_data
        out_files.append(svg_fig)

    return out_files
Example #31
0
def plot_diff_avg(scores, threshold=0.01, coords=None, cross=False, **kwargs):
    '''plots subject scoremap using nilearn and returns display object'''
    mask = spenc_dir+'temporal_lobe_mask_grp_7T_test.nii.gz'
    background_img = '/home/data/psyinf/forrest_gump/anondata/templates/' + \
            'grpbold7Tp1/brain.nii.gz'
    scores = scores.copy()
    scores[np.abs(scores)<threshold] = 0.0
    unmasked = unmask(scores, mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=cross, **kwargs)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Example #32
0
def plot_subj_ko(subj, scores, threshold=0.01, coords=None):
    '''plots subject scoremap using nilearn and returns display object'''
    subj_mask = './masks/template_mask_thick.nii.gz'
    background_img = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
    scores = scores.copy()
    scores[scores<threshold] = 0.0
    unmasked = unmask(scores, subj_mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    symmetric_cbar=False,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=False)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Example #33
0
def plot_subj(subj, scores, threshold=0.01, coords=None):
    '''plots subject scoremap using nilearn and returns display object'''
    subj_mask = spenc_dir+'temporal_lobe_mask_brain_subj{0:02}bold.nii.gz'.format(subj)
    background_img = '/home/data/psyinf/forrest_gump/anondata/sub{0:03}/'.format(subj)+\
            'templates/bold7Tp1/brain.nii.gz'
    scores = scores.copy()
    scores[scores<threshold] = 0.0
    unmasked = unmask(scores, subj_mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    symmetric_cbar=False,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=False)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Example #34
0
    def _generate_report(self):
        """ Generates the visual report """
        from niworkflows.viz.utils import plot_registration
        NIWORKFLOWS_LOG.info('Generating visual report')

        fixed_image_nii = load_img(self._fixed_image)
        moving_image_nii = load_img(self._moving_image)
        contour_nii = load_img(self._contour) if self._contour is not None else None

        if self._fixed_image_mask:
            fixed_image_nii = unmask(apply_mask(fixed_image_nii,
                                                self._fixed_image_mask),
                                     self._fixed_image_mask)
            # since the moving image is already in the fixed image space we
            # should apply the same mask
            moving_image_nii = unmask(apply_mask(moving_image_nii,
                                                 self._fixed_image_mask),
                                      self._fixed_image_mask)
            mask_nii = load_img(self._fixed_image_mask)
        else:
            mask_nii = threshold_img(fixed_image_nii, 1e-3)

        n_cuts = 7
        if not self._fixed_image_mask and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(fixed_image_nii, 'fixed-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              label=self._fixed_image_label,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            plot_registration(moving_image_nii, 'moving-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              label=self._moving_image_label,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            out_file=self._out_report
        )
Example #35
0
def get_masked_epi(fmri_instances, masker=None, smooth_factor=10, threshold="80%"):
	if(masker == None):
		if(isinstance(fmri_instances, list)):
			img_epi = image.mean_img(fmri_instances)
			img_epi = image.smooth_img(img_epi, smooth_factor)
			img_epi = image.threshold_img(img_epi, threshold=threshold)

			masker = NiftiMasker()
			masker.fit(img_epi)

			masked_instances = []

			for instance in fmri_instances:
				masked_instances += [apply_mask(instance, masker.mask_img_)]

			return masked_instances, masker
		else:
			masker = compute_epi_mask(fmri_instances)

	return apply_mask(fmri_instances, masker), masker
Example #36
0
def reference_model_kmeans(X_train,X_test,y_train,y_test,outputpath,estimator,masker,subject):

    estimator.fit(X_train,y_train)
    predictions = estimator.predict(X_test)
    scores = r2_score(y_test, predictions, multioutput='raw_values')
    scores[scores < 0] = 0

    score_map_img = masker.inverse_transform(scores)
    final_img = threshold_img(score_map_img, threshold=1e-6)

    plotting.plot_stat_map(final_img, bg_img=meanepi,cut_coords=5,display_mode='z', aspect=1.25,threshold=1e-6, 
                    title="Results for the reference (no segmentation)")
    #We save the images 
    img4_name = 'ref_img_sub_'+str(subject)+'.nii.gz'
    img4_path = os.path.join(outputpath,img4_name)
    final_img.to_filename(img4_path)
    print("Images saved successfully")
    plt.close()

    return scores
Example #37
0
    def refvol(self): #create reference bold, bold mask, and mask the ref

        refs = [nib.load( os.path.join(self.subj.prep_dir,folder[0],file) )
                        for folder in os.walk(self.subj.prep_dir)
                        for file in folder[2]
                        if '%s_boldref.nii.gz'%(self.space) in file]
        ref = mean_img(refs)
        nib.save(ref,self.subj.refvol)
        
        masks = [nib.load( os.path.join(self.subj.prep_dir,folder[0],file) )
                        for folder in os.walk(self.subj.prep_dir)
                        for file in folder[2]
                        if '%s_desc-brain_mask.nii.gz'%(self.space) in file]
        mask = mean_img(masks)
        mask = threshold_img(mask,.1)
        nib.save(mask,self.subj.refvol_mask)

        os.system('fslmaths %s -bin %s'%(self.subj.refvol_mask,self.subj.refvol_mask))

        os.system('fslmaths %s -mas %s %s'%(self.subj.refvol,self.subj.refvol_mask,self.subj.refvol_brain))
Example #38
0
def plot_segs(image_nii, seg_niis, out_file, bbox_nii=None, masked=False,
              colors=None, compress='auto', **plot_params):
    """ plot segmentation as contours over the image (e.g. anatomical).
    seg_niis should be a list of files. mask_nii helps determine the cut
    coordinates. plot_params will be passed on to nilearn plot_* functions. If
    seg_niis is a list of size one, it behaves as if it was plotting the mask.
    """
    plot_params = {} if plot_params is None else plot_params

    image_nii = _3d_in_file(image_nii)
    data = image_nii.get_data()

    plot_params = robust_set_limits(data, plot_params)

    bbox_nii = nb.load(image_nii if bbox_nii is None else bbox_nii)
    if masked:
        bbox_nii = nlimage.threshold_img(bbox_nii, 1e-3)

    cuts = cuts_from_bbox(bbox_nii, cuts=7)
    plot_params['colors'] = colors or plot_params.get('colors', None)
    out_files = []
    for d in plot_params.pop('dimensions', ('z', 'x', 'y')):
        plot_params['display_mode'] = d
        plot_params['cut_coords'] = cuts[d]
        svg = _plot_anat_with_contours(image_nii, segs=seg_niis, compress=compress,
                                       **plot_params)

        # Find and replace the figure_1 id.
        try:
            xml_data = etree.fromstring(svg)
        except etree.XMLSyntaxError as e:
            NIWORKFLOWS_LOG.info(e)
            return
        find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS)
        find_text(xml_data)[0].set('id', 'segmentation-%s-%s' % (d, uuid4()))

        svg_fig = SVGFigure()
        svg_fig.root = xml_data
        out_files.append(svg_fig)

    return out_files
def mp2rage_masked(filename_uni, filename_inv2, filename_output=None, threshold="70%"):
    """ This is an alternativ approach to get rid of the unusual noise
    distribution in the background of mp2rage images. It was inspired by
    a suggestion on GitHub.com and works as follows:
    1. thresholding the INV2 image 
    2. creating a background mask using nilearn's 'compute_background_mask'
    3. applying that mask to clear the background from noise
    
    Parameters
    ----------
    filename_uni : string
        path to the uniform T1-image (UNI)
    filename_inv2 : string
        path to the second inversion image (INV2)
    filename_output : string  (optional)
        path to output image 
    threshold : float, string (optional)
        absolute value (float) or percentage (string, e. g. '50%')
    """
    # load data
    image_uni  = nib.load(filename_uni)
    image_inv2 = nib.load(filename_inv2)

    image_uni_fdata = image_uni.get_fdata()
    image_inv2_fdata  = image_inv2.get_fdata()
    
    # scale UNI image values 
    if (np.amin(image_uni_fdata) >=0) and (np.amax(image_uni_fdata >= 0.51)):
        scale = lambda x: (x - np.amax(image_uni_fdata)/2) / np.amax(image_uni_fdata)
        image_uni_fdata = scale(image_uni_fdata)

    image_mask = masking.compute_background_mask(image.threshold_img(image_inv2,threshold))

    image_uni_fdata[image_mask.get_data()==0] = -.5

    image_output = nib.Nifti1Image(image_uni_fdata, image_uni.affine, nib.Nifti1Header())

    if filename_output:
        nib.save(image_output, filename_output)
    else:
        return image_output
Example #40
0
File: plot.py Project: jerdra/niviz
def plot_montage(data,
                 orientation,
                 bbox_nii=None,
                 n_cuts=15,
                 n_cols=5,
                 auto_brightness=False,
                 plot_func=nplot.plot_anat,
                 figure_title="figure"):
    '''
    Plot a montage of cuts for a given orientation
    for an image
    '''

    if bbox_nii is None:
        bbox_nii = nimg.threshold_img(data, 1e-3)

    cuts = cuts_from_bbox(bbox_nii, cuts=n_cuts)
    if auto_brightness:
        robust_params = robust_set_limits(bbox_nii.get_fdata().reshape(-1), {})
    else:
        robust_params = {}

    svgs = []
    for i in range(n_cuts // n_cols):

        start = i * n_cols
        end = min(i * n_cols + n_cols, n_cuts)
        row_cuts = cuts[orientation][start:end]

        plot_params = {
            "display_mode": orientation,
            "cut_coords": row_cuts,
            **robust_params
        }

        display = plot_func(data, **plot_params)
        svg = extract_svg(display)
        svg = svg.replace("figure_1", f"{figure_title}:{start}-{end}")
        svgs.append(fromstring(svg))

    return svgs
Example #41
0
def mean_mixture(data, index=None, fwhm=None, threshold=None, **kwargs):
    # extract array
    try:
        X = data.data.values
    except:
        X = data.X
   
    # extract rows based on index
    mm_X = X[list(index), :].copy() # 
    mm_X = mm_X.mean(axis=0, keepdims=True)

    # unmask
    mm_img = data.masker.inverse_transform(mm_X)
  
    # smooth, threshold
    #mm_img = image.mean_img(mm_img)
    if fwhm is not None:
        mm_img = image.smooth_img(mm_img, fwhm=fwhm)        
    if threshold is not None:
        mm_img = image.threshold_img(mm_img, threshold)
    return mm_img
################################################################################
# Fetching t-statistic image of localizer constrasts by loading from datasets
# utilities
from nilearn import datasets

localizer = datasets.fetch_neurovault_auditory_computation_task()
tmap_filename = localizer.images[0]

################################################################################
# Threshold the t-statistic image by importing threshold function
from nilearn.image import threshold_img

# Two types of strategies can be used from this threshold function
# Type 1: strategy used will be based on scoreatpercentile
threshold_percentile_img = threshold_img(tmap_filename, threshold='97%')


# Type 2: threshold strategy used will be based on image intensity
# Here, threshold value should be within the limits i.e. less than max value.
threshold_value_img = threshold_img(tmap_filename, threshold=3.0)

################################################################################
# Visualization
# Showing thresholding results by importing plotting modules and its utilities
from nilearn import plotting

# Showing percentile threshold image
plotting.plot_stat_map(threshold_percentile_img, display_mode='z', cut_coords=5,
                       title='Threshold image with string percentile', colorbar=False)
        fig = plt.figure(figsize=(6, 6))
        filtered = pca.transform(model_preds[:, voxels_to_use])
        PC_rep[model] = filtered
        scatter = plt.scatter(filtered[:, 0], filtered[:, 1], cmap='YlGnBu',
                              c=logenergy, marker='o', alpha=0.3)
        plt.xlabel('1st PC')
        plt.ylabel('2nd PC')
        plt.title('{0}, 1st PC r w/ log stim energy {1:.2f}'.format(model, corrcoef(filtered[:, 0], logenergy)[0,1]))
        plt.savefig(spenc_dir+'MaThe/plots/feature_space/two_PCs_{}_subj_{}.svg'.format(model, subj))
        plt.close()

        subj_mask = spenc_dir + 'temporal_lobe_mask_head_subj{0:02}bold.nii.gz'.format(subj)
        background_img = '/home/data/psyinf/forrest_gump/anondata/sub{0:03}/'.format(subj) + 'templates/bold7Tp1/head.nii.gz'
        scores[scores<threshold] = 0
        unmasked = unmask(scores, subj_mask)
        unmasked = threshold_img(unmasked, threshold)
        display = plot_stat_map(
                        unmasked, bg_img=background_img, symmetric_cbar=False, aspect=1.25,
                        vmax=max_r2, threshold=threshold, draw_cross=False, dim=-1.)
        fig = plt.gcf()
        fig.set_size_inches(12, 4)

        plt.savefig(spenc_dir+'MaThe/plots/feature_space/PC_model_{}_subj_{}_pcnr_0.svg'.format(model, subj))
        plt.close()
        display = plot_stat_map(
                        unmasked, bg_img=background_img, symmetric_cbar=False, aspect=1.25,
                        vmax=max_r2, threshold=threshold, dim=-1.)

        plt.savefig(spenc_dir+'MaThe/plots/feature_space/PC_model_{}_subj_{}_pcnr_0_cross.svg'.format(model, subj))
        plt.close()
        preds_pc = np.zeros_like(filtered)
Example #44
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) - 1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1,)))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
                           multioutput='raw_values'))

##############################################################################
# Mapping the encoding scores on the brain
# ----------------------------------------
# To plot the scores onto the brain, we create a Nifti1Image containing
# the scores and then threshold it:

from nilearn.image import threshold_img
cut_score = np.mean(scores, axis=0)
cut_score[cut_score < 0] = 0

# bring the scores into the shape of the background brain
score_map_img = masker.inverse_transform(cut_score)

thresholded_score_map_img = threshold_img(score_map_img, threshold=1e-6)

##############################################################################
# Plotting the statistical map on a background brain, we mark four voxels
# which we will inspect more closely later on.
from nilearn.plotting import plot_stat_map
from nilearn.image.resampling import coord_transform

def index_to_xy_coord(x, y, z=10):
    '''Transforms data index to coordinates of the background + offset'''
    coords = coord_transform(x, y, z,
                             affine=thresholded_score_map_img.get_affine())
    return np.array(coords)[np.newaxis, :] + np.array([0, 1, 0])


xy_indices_of_special_voxels = [(30, 10), (32, 10), (31, 9), (31, 10)]
            if luminaFlag:
                if config.iloc[indx+n][' STIM']==' LUMINA' and numTmp==0:
                    numButton+=1
                    numTmp+=1
            else:
                if config.iloc[indx+n][' STIM']!='53' and numTmp==0:
                    numButton+=1
                    numTmp+=1
    return numButton


mask_name='/home/jmuraskin/Projects/CCD/working_v1/seg_probabilities/grey_matter_mask-%d-percent.nii.gz' % int(gmThresh*100)

if not os.path.exists(mask_name):
    from nilearn.image import threshold_img
    mask_img=threshold_img('/home/jmuraskin/Projects/CCD/working_v1/seg_probabilities/grey_matter_mask.nii.gz',threshold=gmThresh)
    mask_img_data=mask_img.get_data()
    mask_img_data[mask_img_data>0]=1
    mask_img=new_img_like(mask_img,mask_img_data)
    mask_img.to_filename(mask_name)

if runFC:
    copesToRun=[0]
else:
    fc=''

if RSN>0:
    rsn_name='RSN%d' % (RSN-1)
    rsn=RSN-1
    copesToRun=[0]
    fc=''