Ejemplo n.º 1
0
def map_plane(estimates,
              atlas,
              path,
              suffix="",
              plane="z",
              cbar=False,
              annotate=False,
              vmin=None,
              vmax=None,
              cmaps=[],
              print_fig=True,
              verbose=False):

    from nilearn import image, plotting

    for f, feature in enumerate(estimates.columns):
        stat_map = image.copy_img(atlas).get_data()
        data = estimates[feature]
        if verbose:
            print("{:20s} Min: {:6.4f}  Mean: {:6.4f}  Max: {:6.4f}".format(
                feature, min(data), np.mean(data), max(data)))
        if not verbose and print_fig:
            print("\n{}".format(feature))
        for i, value in enumerate(data):
            stat_map[stat_map == i + 1] = value
        stat_map = image.new_img_like(atlas, stat_map)
        display = plotting.plot_stat_map(stat_map,
                                         display_mode=plane,
                                         symmetric_cbar=False,
                                         colorbar=cbar,
                                         cmap=cmaps[f],
                                         threshold=vmin,
                                         vmax=vmax,
                                         alpha=0.5,
                                         annotate=annotate,
                                         draw_cross=False)
        file_name = "{}/{}{}.png".format(path, feature, suffix)
        display.savefig(file_name, dpi=250)
        transparent_background(file_name)
        if print_fig:
            plotting.show()
        display.close()
Ejemplo n.º 2
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) -
                                      1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1, )))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered
Ejemplo n.º 3
0
    def create_binaryMasks(self, mask_fname):
        """Create binary masks from mapped ICA components"""

        if os.path.splitext(mask_fname)[-1] in [
                '.img', '.hdr', '.nii', '.csv'
        ]:
            mask_fname = os.path.splitext(mask_fname)[0]
        elif os.path.splitext(mask_fname)[-1] in ['.gz']:
            mask_fname = os.path.splitext(mask_fname)[0]
            if os.path.splitext(mask_fname)[-1] in ['.nii']:
                mask_fname = os.path.splitext(mask_fname)[0]
        mask_dir = os.path.dirname(mask_fname)
        mask_basename = os.path.basename(mask_fname)
        mask_fname = opj(mask_dir, mask_basename + '.nii.gz')
        csv_fname = opj(mask_dir, mask_basename + '.csv')

        title = 'Created masks from classified ICs'
        message = ''
        if self.mask_specs['thresh_percentile']:
            message += 'Classified ICs thresholded using the top '
            message += str(int(
                self.mask_specs['cutoff_percentile'])) + '% of voxels. '
        if self.mask_specs['thresh_max']:
            message += 'Classified ICs thresholded using '
            message += str(
                self.mask_specs['cutoff_fractMax']) + ' * maximum value. '
        message += '\n\nCreated files:'
        message += '\n  ' + os.path.basename(
            mask_fname) + ': 4D-nifti containing ICs classified as ICNs'
        message += '\n  ' + os.path.basename(
            csv_fname) + ': ICN names/labels for above nifti'
        message += '\n\nFiles created in:\n  ' + mask_dir
        QtWidgets.QMessageBox.information(None, title, message)

        # Create 4D nifti binary mask
        mask_noise = []
        mask_imgs = []
        mask_names = []

        # lambda fn. below separates string w/ '>' then casts last part into digit if needed
        for mapping_lookup in sorted(
                self.gd['mapped'].keys(),
                key=lambda item:
            (int(item.partition('>')[-1])
             if item.partition('>')[-1].isdigit() else float('inf'))):
            ica_lookup = self.gd['mapped'][mapping_lookup]['ica_lookup']
            icn_name = self.gd['mapped'][mapping_lookup]['icn_custom_name']
            if re.match('\\.*noise', icn_name, flags=re.IGNORECASE):
                mask_noise.append('noise')
            else:
                mask_noise.append('ICN')

            ica_img = image.copy_img(self.gd['ica'][ica_lookup]['img'])
            ica_dat = ica_img.get_fdata(caching='unchanged')
            ica_dat[np.isnan(ica_dat)] = 0
            if self.mask_specs['thresh_percentile']:
                threshold = np.percentile(ica_dat,
                                          self.mask_specs['cutoff_percentile'])
                ica_dat[ica_dat < threshold] = 0
            if self.mask_specs['thresh_max']:
                threshold = ica_dat.max() * self.mask_specs['cutoff_fractMax']
                ica_dat[ica_dat < threshold] = 0
            ica_dat[ica_dat > 0] = 1
            if self.mask_specs['smooth_mask']:
                ica_dat = binary_dilation(
                    ica_dat)  #smooths edges & fills holes in mask
                ica_dat = binary_dilation(ica_dat)  #repeat, further smoothing
            new_ica_img = image.new_img_like(ica_img,
                                             ica_dat,
                                             copy_header=True)
            mask_imgs.append(new_ica_img)
            mask_names.append(icn_name)
        image.concat_imgs(mask_imgs,
                          dtype=self.mask_specs['mask_dtype'],
                          auto_resample=True).to_filename(mask_fname)

        # Create csv w/ ICA networks & named ICN matches as columns
        icn_info = {}
        for k, icn_name in enumerate(mask_names):
            ic_name = mask_basename + ',%d' % (k + 1)
            icn_info[ic_name] = (icn_name, mask_noise[k])
        with open(csv_fname, 'w') as f:
            writer = csv.writer(f)
            writer.writerow(
                ('ICA component:', 'ICN Label:', 'Noise Classification:'))

            #lambda separates string w/ ',' then casts last part into digit if needed
            for ic in sorted(icn_info.keys(),
                             key=lambda item:
                             (int(item.partition(',')[-1])
                              if item[-1].isdigit() else float('inf'))):
                writer.writerow((ic, icn_info[ic][0], icn_info[ic][1]))
Ejemplo n.º 4
0
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
    """
    Spatially clusters `img`

    Parameters
    ----------
    img : str or img_like
        Image file or object to be clustered
    min_cluster_size : int
        Minimum cluster size (in voxels)
    threshold : float, optional
        Whether to threshold `img` before clustering
    index : array_like, optional
        Whether to extract volumes from `img` for clustering
    mask : (S,) array_like, optional
        Boolean array for masking resultant data array

    Returns
    -------
    clustered : :obj:`numpy.ndarray`
        Boolean array of clustered (and thresholded) `img` data
    """

    # we need a 4D image for `niimg.iter_img`, below
    img = niimg.copy_img(check_niimg(img, atleast_4d=True))

    # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
    # represents the minimum number of voxels we want to be in a cluster,
    # rather than the minimum size of the desired clusters in mm^3
    if not np.all(np.abs(np.diag(img.affine)) == 1):
        img.set_sform(np.sign(img.affine))

    # grab desired volumes from provided image
    if index is not None:
        if not isinstance(index, list):
            index = [index]
        img = niimg.index_img(img, index)

    # threshold image
    if threshold is not None:
        img = niimg.threshold_img(img, float(threshold))

    clout = []
    for subbrick in niimg.iter_img(img):
        # `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
        # subtract one voxel to ensure we aren't hitting this thresholding issue
        try:
            clsts = connected_regions(subbrick,
                                      min_region_size=int(min_cluster_size) - 1,
                                      smoothing_fwhm=None,
                                      extract_type='connected_components')[0]
        # if no clusters are detected we get a TypeError; create a blank 4D
        # image object as a placeholder instead
        except TypeError:
            clsts = niimg.new_img_like(subbrick,
                                       np.zeros(subbrick.shape + (1,)))
        # if multiple clusters detected, collapse into one volume
        clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]

    # convert back to data array and make boolean
    clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0

    # if mask provided, mask output
    if mask is not None:
        clustered = clustered[mask]

    return clustered