Esempio n. 1
0
def test_invalids_extract_types_in_connected_regions():
    maps, _ = generate_maps((10, 11, 12), n_regions=2)
    valid_names = ['connected_components', 'local_regions']

    # test whether same error raises as expected when invalid inputs
    # are given to extract_type in connected_regions function
    message = ("'extract_type' should be {0}")
    for invalid_extract_type in ['connect_region', 'local_regios']:
        with pytest.raises(ValueError, match=message.format(valid_names)):
            connected_regions(maps, extract_type=invalid_extract_type)
Esempio n. 2
0
def process_img(stat_img, cluster_extent, voxel_thresh=1.96):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    cluster_extent : int
        Minimum number of voxels required to consider a cluster
    voxel_thresh : int, optional
        Threshold to apply to `stat_img`. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    else:
        # ensure that threshold is not greater than most extreme value in image
        if voxel_thresh > np.abs(stat_img.get_data()).max():
            empty = np.zeros(stat_img.shape + (1,))
            return image.new_img_like(stat_img, empty)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    min_region_size = cluster_extent * np.prod(thresh_img.header.get_zooms())
    clusters = []
    for sign in ['pos', 'neg']:
        # keep only data of given sign
        data = thresh_img.get_data().copy()
        data[(data < 0) if sign == 'pos' else (data > 0)] = 0

        # Do nothing if data array contains only zeros
        if np.any(data):
            try:
                clusters += [connected_regions(
                    image.new_img_like(thresh_img, data),
                    min_region_size=min_region_size,
                    extract_type='connected_components')[0]]
            except TypeError:  # for no clusters
                pass

    # Return empty image if no clusters were found
    if len(clusters) == 0:
        return image.new_img_like(thresh_img, np.zeros(data.shape + (1,)))

    # Reorder clusters by their size
    clust_img = image.concat_imgs(clusters)
    cluster_size = (clust_img.get_data() != 0).sum(axis=(0, 1, 2))
    new_order = np.argsort(cluster_size)[::-1]
    clust_img_ordered = image.index_img(clust_img, new_order)

    return clust_img_ordered
Esempio n. 3
0
def cluster_binary_img(binary_img, mask_img, min_region_size='exhaustive'):

    # get voxel resolution in binary_img
    # NOTE: function currently assumes equal width in x,y,z direction
    voxel_sizes = binary_img.header.get_zooms()

    # if not specfied by user, cluster exhaustive, i.e. assign each and every
    # voxel to one and only one cluster
    if min_region_size == 'exhaustive':
        min_region_size_ = _get_voxel_volume(voxel_sizes) - 1
    else:
        min_region_size_ = min_region_size

    # count overall number of 1s in the binary image
    total_n_voxels = np.count_nonzero(binary_img.get_fdata())

    # extract clusters in binary image
    cluster_imgs, indices = connected_regions(
        maps_img=binary_img,
        min_region_size=min_region_size_,
        extract_type='connected_components',
        smoothing_fwhm=None,
        mask_img=mask_img)

    # Get sizes of clusters (number of voxels that have been assigned to each region)
    # As a sanity check + for user information get size of every region and
    # count overall number of voxels that have been assigned to that region
    cluster_sizes = []
    total_n_voxels_assigned = 0

    for idx, cluster_img in enumerate(iter_img(cluster_imgs)):
        cluster_img_data = cluster_img.get_fdata()
        cluster_img_size = np.count_nonzero(cluster_img_data)
        cluster_sizes.append(cluster_img_size)
        total_n_voxels_assigned += cluster_img_size

    if total_n_voxels_assigned != total_n_voxels:
        raise ValueError(
            'Number of voxels in output clustered image is different from total number of voxels in input binary image '
        )

    # Collapse the extracted cluster images to one cluster atlas image
    cluster_imgs_labeled = []

    for idx, cluster_img in enumerate(iter_img(cluster_imgs), start=1):
        cluster_img_labeled = math_img(
            f"np.where(cluster_img == 1,{idx},cluster_img)",
            cluster_img=cluster_img)
        cluster_imgs_labeled.append(cluster_img_labeled)

    cluster_img_atlas = math_img("np.sum(imgs,axis=3)",
                                 imgs=cluster_imgs_labeled)

    # plot the cluster atlas image
    plotting.plot_roi(cluster_img_atlas,
                      title='Clustered Binary Image',
                      draw_cross=False)

    return cluster_sizes, cluster_img_atlas
def test_connected_regions():
    # 4D maps
    n_regions = 4
    maps, _ = generate_maps((30, 30, 30), n_regions=n_regions)
    # 3D maps
    map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30)
    map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))

    # smoke test for function connected_regions and also to check
    # if the regions extracted should be equal or more than already present.
    # 4D image case
    for extract_type in ['connected_components', 'local_regions']:
        connected_extraction_img, index = connected_regions(maps, min_region_size=10,
                                                            extract_type=extract_type)
        assert_true(connected_extraction_img.shape[-1] >= n_regions)
        assert_true(index, np.ndarray)
        # For 3D images regions extracted should be more than equal to one
        connected_extraction_3d_img, _ = connected_regions(map_img, min_region_size=10,
                                                           extract_type=extract_type)
        assert_true(connected_extraction_3d_img.shape[-1] >= 1)
def test_connected_regions():
    # 4D maps
    n_regions = 4
    maps, _ = generate_maps((30, 30, 30), n_regions=n_regions)
    # 3D maps
    map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30)
    map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))

    # smoke test for function connected_regions and also to check
    # if the regions extracted should be equal or more than already present.
    # 4D image case
    for extract_type in ['connected_components', 'local_regions']:
        connected_extraction_img, index = connected_regions(
            maps, min_region_size=10, extract_type=extract_type)
        assert_true(connected_extraction_img.shape[-1] >= n_regions)
        assert_true(index, np.ndarray)
        # For 3D images regions extracted should be more than equal to one
        connected_extraction_3d_img, _ = connected_regions(
            map_img, min_region_size=10, extract_type=extract_type)
        assert_true(connected_extraction_3d_img.shape[-1] >= 1)
Esempio n. 6
0
    def create_local_clustering(self, overwrite, r_thresh):
        """
        API for performing any of a variety of clustering routines available through NiLearn.
        """
        import os.path as op
        from scipy.sparse import save_npz, load_npz
        from nilearn.regions import connected_regions
        from pynets.fmri.clustools import make_local_connectivity_tcorr, make_local_connectivity_scorr

        conn_comps = len(connected_regions(self._clust_mask_corr_img, extract_type='connected_components',
                                           min_region_size=1)[1])
        if self.clust_type == 'kmeans':
            if self.k > conn_comps:
                if conn_comps != 1:
                    raise ValueError('k must be less than or equal to the total number of connected components in '
                                     'the mask in the case of kmeans clustering.')

        if self.clust_type == 'complete' or self.clust_type == 'average' or self.clust_type == 'single':
            if conn_comps > 1:
                raise ValueError('Complete, Average, and Single linkage agglomerative clustering are unstable in the '
                                 'case of multiple connected components.')

        if self.clust_type == 'ward':
            if self.k < conn_comps:
                raise ValueError('k must minimally be greater than the total number of connected components in '
                                 'the mask in the case of agglomerative clustering.')
            if self.local_corr == 'tcorr' or self.local_corr == 'scorr':
                self._local_conn_mat_path = "%s%s%s%s" % (self.uatlas.split('.nii')[0], '_', self.local_corr,
                                                          '_conn.npz')

                if (not op.isfile(self._local_conn_mat_path)) or (overwrite is True):
                    if self.local_corr == 'tcorr':
                        self._local_conn = make_local_connectivity_tcorr(self._func_img, self._clust_mask_corr_img,
                                                                         thresh=r_thresh)
                    elif self.local_corr == 'scorr':
                        self._local_conn = make_local_connectivity_scorr(self._func_img, self._clust_mask_corr_img,
                                                                         thresh=r_thresh)
                    else:
                        raise ValueError('Local connectivity type not available')

                    print("%s%s" % ('Saving spatially constrained connectivity structure to: ',
                                    self._local_conn_mat_path))
                    save_npz(self._local_conn_mat_path, self._local_conn)
                elif op.isfile(self._local_conn_mat_path):
                    self._local_conn = load_npz(self._local_conn_mat_path)
            elif self.local_corr == 'allcorr':
                self._local_conn = 'auto'
            else:
                raise ValueError('Local connectivity method not recognized. Only tcorr, scorr, and auto are currently '
                                 'supported')
        else:
            self._local_conn = 'auto'
        return
Esempio n. 7
0
def test_connected_regions():
    # 4D maps
    n_regions = 4
    maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions)
    # 3D maps
    map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30)
    map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))

    # smoke test for function connected_regions and also to check
    # if the regions extracted should be equal or more than already present.
    # 4D image case
    for extract_type in ['connected_components', 'local_regions']:
        connected_extraction_img, index = connected_regions(maps, min_region_size=10,
                                                            extract_type=extract_type)
        assert_true(connected_extraction_img.shape[-1] >= n_regions)
        assert_true(index, np.ndarray)
        # For 3D images regions extracted should be more than equal to one
        connected_extraction_3d_img, _ = connected_regions(map_img, min_region_size=10,
                                                           extract_type=extract_type)
        assert_true(connected_extraction_3d_img.shape[-1] >= 1)

    # Test input mask_img
    extraction_with_mask_img, index = connected_regions(maps,
                                                        mask_img=mask_img)
    assert_true(extraction_with_mask_img.shape[-1] >= 1)

    # mask_img with different shape
    mask = np.zeros(shape=(10, 11, 12), dtype=np.int)
    mask[1:-1, 1:-1, 1:-1] = 1
    affine = np.array([[2., 0., 0., 0.],
                       [0., 2., 0., 0.],
                       [0., 0., 2., 0.],
                       [0., 0., 0., 2.]])
    mask_img = nibabel.Nifti1Image(mask, affine=affine)
    extraction_not_same_fov_mask, _ = connected_regions(maps,
                                                        mask_img=mask_img)
    assert_equal(maps.shape[:3], extraction_not_same_fov_mask.shape[:3])
    assert_not_equal(mask_img.shape, extraction_not_same_fov_mask.shape[:3])
Esempio n. 8
0
def test_connected_regions():
    # 4D maps
    n_regions = 4
    maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions)
    # 3D maps
    map_img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30)
    map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))

    # smoke test for function connected_regions and also to check
    # if the regions extracted should be equal or more than already present.
    # 4D image case
    for extract_type in ['connected_components', 'local_regions']:
        connected_extraction_img, index = connected_regions(maps, min_region_size=10,
                                                            extract_type=extract_type)
        assert_true(connected_extraction_img.shape[-1] >= n_regions)
        assert_true(index, np.ndarray)
        # For 3D images regions extracted should be more than equal to one
        connected_extraction_3d_img, _ = connected_regions(map_img, min_region_size=10,
                                                           extract_type=extract_type)
        assert_true(connected_extraction_3d_img.shape[-1] >= 1)

    # Test input mask_img
    extraction_with_mask_img, index = connected_regions(maps,
                                                        mask_img=mask_img)
    assert_true(extraction_with_mask_img.shape[-1] >= 1)

    # mask_img with different shape
    mask = np.zeros(shape=(10, 11, 12), dtype=np.int)
    mask[1:-1, 1:-1, 1:-1] = 1
    affine = np.array([[2., 0., 0., 0.],
                       [0., 2., 0., 0.],
                       [0., 0., 2., 0.],
                       [0., 0., 0., 2.]])
    mask_img = nibabel.Nifti1Image(mask, affine=affine)
    extraction_not_same_fov_mask, _ = connected_regions(maps,
                                                        mask_img=mask_img)
    assert_equal(maps.shape[:3], extraction_not_same_fov_mask.shape[:3])
    assert_not_equal(mask_img.shape, extraction_not_same_fov_mask.shape[:3])
Esempio n. 9
0
def test_connected_regions():
    rng = np.random.RandomState(42)

    # 4D maps
    n_regions = 4
    maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions)
    # 3D maps
    map_img = np.zeros(
        (30, 30, 30)) + 0.1 * rng.standard_normal(size=(30, 30, 30))
    map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))

    # smoke test for function connected_regions and also to check
    # if the regions extracted should be equal or more than already present.
    # 4D image case
    for extract_type in ['connected_components', 'local_regions']:
        connected_extraction_img, index = connected_regions(
            maps, min_region_size=10, extract_type=extract_type)
        assert connected_extraction_img.shape[-1] >= n_regions
        assert index, np.ndarray
        # For 3D images regions extracted should be more than equal to one
        connected_extraction_3d_img, _ = connected_regions(
            map_img, min_region_size=10, extract_type=extract_type)
        assert connected_extraction_3d_img.shape[-1] >= 1

    # Test input mask_img
    mask = get_data(mask_img)
    mask[1, 1, 1] = 0
    extraction_with_mask_img, index = connected_regions(maps,
                                                        mask_img=mask_img)
    assert extraction_with_mask_img.shape[-1] >= 1

    extraction_without_mask_img, index = connected_regions(maps)
    assert np.all(get_data(extraction_with_mask_img)[mask == 0] == 0.)
    assert not np.all(get_data(extraction_without_mask_img)[mask == 0] == 0.)

    # mask_img with different shape
    mask = np.zeros(shape=(10, 11, 12), dtype=np.int)
    mask[1:-1, 1:-1, 1:-1] = 1
    affine = np.array([[2., 0., 0., 0.], [0., 2., 0., 0.], [0., 0., 2., 0.],
                       [0., 0., 0., 2.]])
    mask_img = nibabel.Nifti1Image(mask, affine=affine)
    extraction_not_same_fov_mask, _ = connected_regions(maps,
                                                        mask_img=mask_img)
    assert maps.shape[:3] == extraction_not_same_fov_mask.shape[:3]
    assert mask_img.shape != extraction_not_same_fov_mask.shape[:3]

    extraction_not_same_fov, _ = connected_regions(maps)
    assert (np.sum(get_data(extraction_not_same_fov) == 0) >
            np.sum(get_data(extraction_not_same_fov_mask) == 0))
def _region_extractor_labels_image(atlas, extract_type='connected_components',
                                   min_region_size=0):
    """ Function takes atlas image denoted as labels for each region
        and then imposes region extraction algorithm on the image to
        split them into regions apart.

    Parameters
    ----------
    atlas : 3D Nifti-like image
        An image contains labelled regions.

    extract_type : 'connected_components', 'local_regions'
        See nilearn.regions.connected_regions for full documentation

    min_region_size : in mm^3
        Minimum size of voxels in a region to be kept.

    """
    atlas_img = check_niimg(atlas)
    atlas_data = _safe_get_data(atlas_img)
    affine = atlas_img.get_affine()

    n_labels = np.unique(np.asarray(atlas_data))

    reg_imgs = []
    for label_id in n_labels:
        if label_id == 0:
            continue
        print("[Region Extraction] Processing with label {0}".format(label_id))
        region = (atlas_data == label_id) * atlas_data
        reg_img = new_img_like(atlas_img, region)
        regions, _ = connected_regions(reg_img, extract_type=extract_type,
                                       min_region_size=min_region_size)
        reg_imgs.append(regions)
    regions_extracted = concat_niimgs(reg_imgs)

    return regions_extracted, n_labels
Esempio n. 11
0
def clusterize_img(stat_img, cluster_extent=20):
    """
    Extracts clusters from statistical map `stat_img`

    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    cluster_extent : int, optional
        Minimum number of voxels required to consider a cluster. Default: 20

    Returns
    ------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """

    stat_img = check_niimg(stat_img)
    min_region_size = cluster_extent * np.prod(stat_img.header.get_zooms())
    cluster_img = connected_regions(stat_img,
                                    min_region_size=min_region_size,
                                    extract_type='connected_components')[0]

    return cluster_img
Esempio n. 12
0
    def create_local_clustering(self, overwrite, r_thresh, min_region_size=80):
        """
        API for performing any of a variety of clustering routines available
         through NiLearn.
        """
        import os.path as op
        from scipy.sparse import save_npz, load_npz
        from nilearn.regions import connected_regions

        try:
            conn_comps = connected_regions(
                self._clust_mask_corr_img,
                extract_type="connected_components",
                min_region_size=min_region_size,
            )
            self._conn_comps = conn_comps[0]
            self.num_conn_comps = len(conn_comps[1])
        except BaseException:
            raise ValueError("Clustering mask is empty!")

        if not self._conn_comps:
            if np.sum(np.asarray(self._clust_mask_corr_img.dataobj)) == 0:
                raise ValueError("Clustering mask is empty!")
            else:
                self._conn_comps = self._clust_mask_corr_img
                self.num_conn_comps = 1
        print(f"Detected {self.num_conn_comps} connected components in "
              f"clustering mask with a mininimum region "
              f"size of {min_region_size}")
        if (self.clust_type == "complete" or self.clust_type == "average"
                or self.clust_type == "single"):
            if self.num_conn_comps > 1:
                raise ValueError(
                    "Clustering method unstable with spatial constrainsts "
                    "applied to multiple connected components.")

        if (self.clust_type == "ward"
                and self.num_conn_comps > 1) or self.clust_type == "ncut":
            if self.k < self.num_conn_comps:
                raise ValueError(
                    "k must minimally be greater than the total number of "
                    "connected components in "
                    "the mask in the case of agglomerative clustering.")

            if self.local_corr == "tcorr" or self.local_corr == "scorr":
                self._local_conn_mat_path = (
                    f"{self.parcellation.split('.nii')[0]}_"
                    f"{self.local_corr}_conn.npz")

                if (not op.isfile(
                        self._local_conn_mat_path)) or (overwrite is True):
                    from pynets.fmri.clustering import (
                        make_local_connectivity_tcorr,
                        make_local_connectivity_scorr,
                    )

                    if self.local_corr == "tcorr":
                        self._local_conn = make_local_connectivity_tcorr(
                            self._func_img,
                            self._clust_mask_corr_img,
                            thresh=r_thresh)
                    elif self.local_corr == "scorr":
                        self._local_conn = make_local_connectivity_scorr(
                            self._func_img,
                            self._clust_mask_corr_img,
                            thresh=r_thresh)
                    else:
                        raise ValueError(
                            "Local connectivity type not available")
                    print(
                        f"Saving spatially constrained connectivity structure"
                        f" to: {self._local_conn_mat_path}")
                    save_npz(self._local_conn_mat_path, self._local_conn)
                elif op.isfile(self._local_conn_mat_path):
                    self._local_conn = load_npz(self._local_conn_mat_path)
            elif self.local_corr == "allcorr":
                if self.clust_type == "ncut":
                    raise ValueError(
                        "Must select either `tcorr` or `scorr` local "
                        "connectivity option if you are using "
                        "`ncut` clustering method")

                self._local_conn = "auto"
            else:
                raise ValueError(
                    "Local connectivity method not recognized. Only tcorr,"
                    " scorr, and auto are currently "
                    "supported")
        else:
            self._local_conn = "auto"
        return
Esempio n. 13
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) -
                                      1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1, )))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
Esempio n. 14
0
                       cut_coords=5,
                       title='Threshold image with string percentile',
                       colorbar=False)

# Showing intensity threshold image
plotting.plot_stat_map(threshold_value_img,
                       display_mode='z',
                       cut_coords=5,
                       title='Threshold image with intensity value',
                       colorbar=False)

################################################################################
# Extracting the regions by importing connected regions function
from nilearn.regions import connected_regions

regions_percentile_img, index = connected_regions(threshold_percentile_img,
                                                  min_region_size=1500)

regions_value_img, index = connected_regions(threshold_value_img,
                                             min_region_size=1500)

################################################################################
# Visualizing region extraction results
title = ("ROIs using percentile thresholding. "
         "\n Each ROI in same color is an extracted region")
plotting.plot_prob_atlas(regions_percentile_img,
                         bg_img=tmap_filename,
                         view_type='contours',
                         display_mode='z',
                         cut_coords=5,
                         title=title)
title = ("ROIs using image intensity thresholding. "
# Showing thresholding results by importing plotting modules and its utilities
from nilearn import plotting

# Showing percentile threshold image
plotting.plot_stat_map(threshold_percentile_img, display_mode='z', cut_coords=5,
                       title='Threshold image with string percentile', colorbar=False)

# Showing intensity threshold image
plotting.plot_stat_map(threshold_value_img, display_mode='z', cut_coords=5,
                       title='Threshold image with intensity value', colorbar=False)

################################################################################
# Extracting the regions by importing connected regions function
from nilearn.regions import connected_regions

regions_percentile_img, index = connected_regions(threshold_percentile_img,
                                                  min_region_size=1500)

regions_value_img, index = connected_regions(threshold_value_img,
                                             min_region_size=1500)

################################################################################
# Visualizing region extraction results
title = ("ROIs using percentile thresholding. "
         "\n Each ROI in same color is an extracted region")
plotting.plot_prob_atlas(regions_percentile_img, bg_img=tmap_filename,
                         view_type='contours', display_mode='z',
                         cut_coords=5, title=title)
title = ("ROIs using image intensity thresholding. "
         "\n Each ROI in same color is an extracted region")
plotting.plot_prob_atlas(regions_value_img, bg_img=tmap_filename,
                         view_type='contours', display_mode='z',
Esempio n. 16
0
def extract_region(threshold_percentile_img, min_region_size):
    regions_img, index = connected_regions(threshold_percentile_img,
                                           min_region_size=min_region_size)
    return regions_img
Esempio n. 17
0
def region_standardize(stat_img,
                       mask_img,
                       extract_type='connected_components',
                       min_region_size=1,
                       smoothing_fwhm=None,
                       ignore_zeros=True,
                       verbose=False):
    """Standardizes a statistical image region-wise.
    
    Regions in the statistical image will be extracted using nilearn.regions.connected_regions.
    Voxels are then z-standardized according to the region-wise mean and standard deviation.
    
    By default this function is set to be exhaustive (using extract_type = 'connected_components'
    and min_region_size=1) that means every voxel is assigned to one particular region. 
    
    If a region contains of only one voxel, the voxel is standardized according to the mean and 
    standard deviation of the whole brain.
    
    Voxels that have not been assigned to a region are also standardized according to the mean
    and standard deviation of the whole brain.
    
    Parameters
    ----------
    stat_img: Niimg-like object
        A statistical image.
    
    extract_type: str {‘connected_components’, ‘local_regions’}, optional
        See nilearn.masking.connected_regions
        
        Default: 'connected_components'
    
    min_region_size: int, optional
        See nilearn.masking.connected_regions
        
        Default: 1 mm^3
    
    smoothing_fwhm: int, optional
        See nilearn.masking.connected_regions
        
        Default: None
    
    mask_img: Niimg-like object
        A binary image used to mask the data.
    
    ignore_zeros: boolean
        If True, zero-voxels inside the statistical image will be ignored
        when calculating descriptive statistics. This makes sense if the 
        statistical image contains lots of zero voxels (e.g. because
        it is already thresholded).
        
    verbose: boolean
        Print information region sizes and percentage of overall assigned voxels
        
    Returns
    -------
    region_standardized_img: Niimg-like object
        The region standardized version of the original statistical image.
    
    region_sizes: list
        A list showing how many regions where extracted (length of the list)
        and how many voxels are assigned to each region.
    
    See also
    --------
    nilearn.regions.connected_regions
    
    """

    # find regions in statistical image
    region_imgs, indices = connected_regions(stat_img,
                                             extract_type=extract_type,
                                             min_region_size=min_region_size,
                                             smoothing_fwhm=smoothing_fwhm)

    region_sizes = []
    n_assigned_voxels = 0

    stat_img_data = np.asarray(stat_img.dataobj)

    # get mean and standard deviation of whole brain
    stat_img_data_masked = apply_mask(stat_img, mask_img)

    if ignore_zeros is True:
        stat_img_data_masked = stat_img_data_masked[np.nonzero(
            stat_img_data_masked)]

    whole_brain_mean = stat_img_data_masked.mean()
    whole_brain_std = stat_img_data_masked.std()

    region_standardized_stat_img_data = np.zeros_like(stat_img_data)
    region_standardized_indices = np.zeros_like(stat_img_data)

    # z-standardize values region-wise
    for idx, region_img in enumerate(iter_img(region_imgs)):

        region_img_data = region_img.get_fdata()

        # for user infomration get size of every region and count overall number
        # of voxels that have been assigned to a region
        region_img_size = np.count_nonzero(region_img_data)
        region_sizes.append(region_img_size)
        n_assigned_voxels += region_img_size

        # get region indices and corresponding values
        region_img_indices = np.where(region_img_data != 0)
        stat_img_data_region_values = stat_img_data[region_img_indices]

        # save indices
        region_standardized_indices[region_img_indices] = 1

        # get the size of each region
        region_size = len(region_img_indices[0])

        # if region consists of more than one voxel calculate the mean and
        # standard deviation from all voxels within that region
        if region_size > 1:
            stat_img_data_region_mean = stat_img_data_region_values.mean()
            stat_img_data_region_std = stat_img_data_region_values.std()

        # if region consists of only one voxel, standardize that voxel
        # according to mean and standard deviation of whole brain
        else:
            stat_img_data_region_mean = whole_brain_mean
            stat_img_data_region_std = whole_brain_std

        # z-standardize region values
        stat_img_data_region_values_std = (
            (stat_img_data_region_values - stat_img_data_region_mean) /
            stat_img_data_region_std)

        # impute z-standardized values into predefined region standardized data array
        region_standardized_stat_img_data[
            region_img_indices] = stat_img_data_region_values_std

    # unassigned voxels are standardized according to whole brain mean and sd
    # get indices of voxels that were not assigned to a region
    # FIXME: Currently function ignores zero-voxels that means zero-voxels
    # within the brain are also ignored. Allow to decide if zero-voxels should
    # be ignored ore not by using ignore_zeros = True/False. For that you
    # have to work with masked data (zero voxels outside brain should
    # always be ignored). Masked data is a 1D array so you have to work with
    # transformations from 1D to 3D.
    unassigned_voxel_indices = np.where((region_standardized_indices != 1)
                                        & (stat_img_data != 0))
    unassigned_voxel_values = stat_img_data[unassigned_voxel_indices]
    unassigned_voxel_values_std = (
        (unassigned_voxel_values - whole_brain_mean) / whole_brain_std)
    region_standardized_stat_img_data[
        unassigned_voxel_indices] = unassigned_voxel_values_std

    # create image from region standardized data
    region_standardized_img = new_img_like(stat_img,
                                           region_standardized_stat_img_data)

    if verbose == True:

        for region_idx in range(0, len(region_sizes)):
            print('Region {}: {} voxels'.format(region_idx,
                                                region_sizes[region_idx]))

        percentage_assigned_voxels = n_assigned_voxels / np.count_nonzero(
            stat_img_data) * 100
        print('{} % of all voxels have been assigned to a region.\n'.format(
            round(percentage_assigned_voxels, 2)))

    return region_standardized_img, region_sizes
Esempio n. 18
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) - 1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1,)))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
Esempio n. 19
0
# threshold_value_img.to_filename("D:/FaceData/threshold_value_img.nii.gz")
from nilearn import plotting
# plotting.plot_stat_map(threshold_percentile_img,display_mode='x',
#                        cut_coords=5, title='threshold image with string'
#                       'percentile',colorbar=False)
plotting.plot_stat_map(threshold_value_img,
                       draw_cross=False,
                       cut_coords=[30, -72, -6],
                       title='threshold image with intensity'
                       'value',
                       colorbar=False)
from nilearn.regions import connected_regions
# regions_percentile_img,index = connected_regions(threshold_percentile_img,
#                                                 min_region_size=1500)
regions_value_img, index = connected_regions(threshold_value_img,
                                             min_region_size=1400)
print(regions_value_img.shape)
# regions_value_img.to_filename("D:/FaceData/roi.nii.gz")
# title = ("ROIs using percentile threshold."
#        "\n Each ROI in same color is an extracted region")
# plotting.plot_prob_atlas(regions_percentile_img,anat_img=t_map,
#                         view_type='contours',display_mode='x',
#                         cut_coords=5)
# title = ("ROIs using image intensity threshold."
#        "\n Each ROI in same color is an extracted region")
plotting.plot_prob_atlas(regions_value_img,
                         bg_img=t_map,
                         view_type='contours',
                         cut_coords=[30, -72, -6])
plotting.plot_roi(regions_value_img,
                  anat_img,
Esempio n. 20
0
def process_img(stat_img, cluster_extent, voxel_thresh=1.96, direction='both'):
    """
    Parameters
    ----------
    stat_img : Niimg_like object
        Thresholded statistical map image
    cluster_extent : int
        Minimum number of voxels required to consider a cluster
    voxel_thresh : float, optional
        Threshold to apply to `stat_img`. Use `direction` to specify the
        directionality of the threshold. If a negative number is provided a
        percentile threshold is used instead, where the percentile is
        determined by the equation `100 - voxel_thresh`. Default: 1.96
    direction : str, optional
        Specifies the direction in which `voxel_thresh` should be applied.
        Possible values are 'both', 'pos' or 'neg'. Default: 'both'

    Returns
    -------
    cluster_img : Nifti1Image
        4D image of brain regions, where each volume is a distinct cluster
    """
    # get input data image
    img_4d = check_niimg(stat_img, atleast_4d=True)
    if img_4d.shape[-1] == 1:
        stat_img = img_4d.slicer[..., 0]
    else:
        stat_img = image.index_img(img_4d, 0)

    # threshold image
    if voxel_thresh < 0:
        voxel_thresh = '{}%'.format(100 + voxel_thresh)
    else:
        # ensure that threshold is not greater than most extreme value in image
        if voxel_thresh > np.nan_to_num(np.abs(stat_img.get_fdata())).max():
            empty = np.zeros(stat_img.shape + (1, ))
            return image.new_img_like(stat_img, empty)
    thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh)

    # extract clusters
    min_region_size = cluster_extent * np.prod(thresh_img.header.get_zooms())
    clusters = []

    if direction == 'both':
        direction_list = ['pos', 'neg']
    elif direction == 'pos':
        direction_list = ['pos']
    elif direction == 'neg':
        direction_list = ['neg']

    for sign in direction_list:
        # keep only data of given sign
        data = thresh_img.get_fdata().copy()
        data[(data < 0) if sign == 'pos' else (data > 0)] = 0

        # Do nothing if data array contains only zeros
        if np.any(data):
            try:
                if min_region_size != 0.0:
                    min_region_size -= 1e-8
                clusters += [
                    connected_regions(image.new_img_like(thresh_img, data),
                                      min_region_size=min_region_size,
                                      extract_type='connected_components')[0]
                ]
            except TypeError:  # for no clusters
                pass

    # Return empty image if no clusters were found
    if len(clusters) == 0:
        return image.new_img_like(thresh_img, np.zeros(data.shape + (1, )))

    # Reorder clusters by their size
    clust_img = image.concat_imgs(clusters)
    cluster_size = (clust_img.get_fdata() != 0).sum(axis=(0, 1, 2))
    new_order = np.argsort(cluster_size)[::-1]
    clust_img_ordered = image.index_img(clust_img, new_order)

    return clust_img_ordered
Esempio n. 21
0
    def create_local_clustering(self, overwrite, r_thresh, min_region_size=80):
        """
        API for performing any of a variety of clustering routines available through NiLearn.
        """
        import os.path as op
        from scipy.sparse import save_npz, load_npz
        from nilearn.regions import connected_regions

        try:
            conn_comps = connected_regions(self._clust_mask_corr_img,
                                           extract_type='connected_components',
                                           min_region_size=min_region_size)
            self._conn_comps = conn_comps[0]
            self.num_conn_comps = len(conn_comps[1])
        except:
            raise ValueError('Clustering mask is empty!')

        if not self._conn_comps:
            if np.sum(np.asarray(self._clust_mask_corr_img.dataobj)) == 0:
                raise ValueError('Clustering mask is empty!')
            else:
                self._conn_comps = self._clust_mask_corr_img
                self.num_conn_comps = 1
        print(
            f"Detected {self.num_conn_comps} connected components in clustering mask with a mininimum region "
            f"size of {min_region_size}")
        if self.clust_type == 'complete' or self.clust_type == 'average' or self.clust_type == 'single':
            if self.num_conn_comps > 1:
                raise ValueError(
                    'Clustering method unstable with spatial constrainsts applied to multiple '
                    'connected components.')

        if (self.clust_type == 'ward'
                and self.num_conn_comps > 1) or self.clust_type == 'ncut':
            if self.k < self.num_conn_comps:
                raise ValueError(
                    'k must minimally be greater than the total number of connected components in '
                    'the mask in the case of agglomerative clustering.')
            if self.local_corr == 'tcorr' or self.local_corr == 'scorr':
                self._local_conn_mat_path = f"{self.uatlas.split('.nii')[0]}_{self.local_corr}_conn.npz"

                if (not op.isfile(
                        self._local_conn_mat_path)) or (overwrite is True):
                    from pynets.fmri.clustools import make_local_connectivity_tcorr, make_local_connectivity_scorr
                    if self.local_corr == 'tcorr':
                        self._local_conn = make_local_connectivity_tcorr(
                            self._func_img,
                            self._clust_mask_corr_img,
                            thresh=r_thresh)
                    elif self.local_corr == 'scorr':
                        self._local_conn = make_local_connectivity_scorr(
                            self._func_img,
                            self._clust_mask_corr_img,
                            thresh=r_thresh)
                    else:
                        raise ValueError(
                            'Local connectivity type not available')

                    print(
                        f"Saving spatially constrained connectivity structure to: {self._local_conn_mat_path}"
                    )
                    save_npz(self._local_conn_mat_path, self._local_conn)
                elif op.isfile(self._local_conn_mat_path):
                    self._local_conn = load_npz(self._local_conn_mat_path)
            elif self.local_corr == 'allcorr':
                if self.clust_type == 'ncut':
                    raise ValueError(
                        'Must select either `tcorr` or `scorr` local connectivity option if you are using '
                        '`ncut` clustering method')
                self._local_conn = 'auto'
            else:
                raise ValueError(
                    'Local connectivity method not recognized. Only tcorr, scorr, and auto are currently '
                    'supported')
        else:
            self._local_conn = 'auto'
        return