예제 #1
0
def apply_roi_mask_4d(image, mask, brain_mask_path=None, method='nilearn'):
    '''
    Apply mask to a 4d image and store resulting vectors in a measurements array (later input for pyrsa.dataset())

    '''

    # Apply brain mask
    if isinstance(brain_mask_path, str):
        brain_mask = nifti1.load(brain_mask_path)
        image_array = apply_brain_mask(image, brain_mask)
    else:
        image_array = image.get_fdata()

    if method == 'nilearn':
        image_tmp = nifti1.Nifti1Image(image_array, mask.affine.copy())
        masked = masking.apply_mask(image_tmp,
                                    mask,
                                    dtype='f',
                                    smoothing_fwhm=None,
                                    ensure_finite=True)
    elif method == 'custom':
        # Alternative to nilearn's implementation (allows for plotting of masked 3d image)
        mask_array = mask.get_fdata()
        masked = image_array[mask_array.astype(bool)]
        masked = masked.T
    elif method == '3d':
        mask_array = mask.get_fdata()
        image_masked = np.multiply(mask_array, image_array)
        masked = nifti1.Nifti1Image(image_masked, mask.affine.copy())

    return masked
예제 #2
0
def save_img(data, filename, mask, header=None):
    """ Save a vectorized image to file. """
    if not header:
        header = mask.get_header()
    header.set_data_dtype(data.dtype)  # Avoids loss of precision
    img = nifti1.Nifti1Image(mask.unmask(data), None, header)
    img.to_filename(filename)
예제 #3
0
def make_mask_map_4d(data, infile, outfile):
    """ Make mask map with 4d dimeions
    data: values for levels in infile. Shape = [4th dimension, regions]
    infile: input file to replace levels with values
    outfile: output file name
    """
    from neurosynth.base.mask import Masker
    from neurosynth.base import imageutils
    from nibabel import nifti1

    data = np.array(data)

    # Load image with masker
    masker = Masker(infile)
    img = imageutils.load_imgs(infile, masker)

    header = masker.get_header()

    shape = header.get_data_shape()[0:3] + (data.shape[0], )
    header.set_data_shape(shape)

    result = []

    for t_dim, t_val in enumerate(data):
        result.append(img.copy())
        for num, value in enumerate(t_val):
            np.place(result[t_dim], img == num + 1, [value])

    result = np.hstack(result)

    header.set_data_dtype(result.dtype)  # Avoids loss of precision
    img = nifti1.Nifti1Image(masker.unmask(result).squeeze(), None, header)
    img.to_filename(outfile)
예제 #4
0
파일: io.py 프로젝트: iZehan/spatial-pbs
def save_3d_data(data, affine, filePath):
    try:
        img = nib.Nifti1Image(numpy.expand_dims(data, axis=3), affine)
        img.to_filename(filePath)
    except:
        print "Failed to save data to", filePath
        print "data shape:", data.shape
        raise
    return
예제 #5
0
파일: snippet.py 프로젝트: szabo92/gistable
def tsnr(data, affine, file_name):
    mean_d = np.mean(data, axis=-1)
    std_d = np.std(data, axis=-1)
    tsnr = mean_d / std_d
    tsnr[np.where(np.isinf(tsnr))] = np.nan
    mean_tsnr = nanmean(np.ravel(tsnr))
    tsnr_image = nifti.Nifti1Image(tsnr, affine)
    save(tsnr_image, file_name)
    savemat(file_name.split('.')[0], {'tsnr': tsnr})
    return mean_tsnr
예제 #6
0
def map_peaks_to_image(peaks, r=4, vox_dims=(2, 2, 2), dims=(91, 109, 91), header=None):
    """ Take a set of discrete foci (i.e., 2-D array of xyz coordinates)
    and generate a corresponding image, convolving each focus with a
    hard sphere of radius r."""
    data = np.zeros(dims)
    for p in peaks:
        valid = get_sphere(p, r, vox_dims, dims)
        valid = valid[:, ::-1]
        data[tuple(valid.T)] = 1
    return nifti1.Nifti1Image(data, None, header=header)
예제 #7
0
def save_img(data, filename, masker, header=None):
    """ Save a vectorized image to file. """
    if not header:
        header = masker.get_header()
    header.set_data_dtype(data.dtype)  # Avoids loss of precision
    # Update min/max -- this should happen on save, but doesn't seem to
    header['cal_max'] = data.max()
    header['cal_min'] = data.min()
    img = nifti1.Nifti1Image(masker.unmask(data), None, header)
    img.to_filename(filename)
예제 #8
0
def atlas_from_freesurfer_masks(sub,
                                mask_dir,
                                roi_ids,
                                rh_constant=200,
                                overwrite=False):
    '''
    Create atlas from individual binary masks produced by freesurfer's 'mri_label2vol' tool

    Args:
        sub (int):
            subject number
        mask_dir (str):
            path to subject's folder with freesurfer output
        roi_ids (dict):
            {target_ROI: Left Hemisphere ROI index}
        rh_constant (int):
            Right Hemisphere ROI index := Left Hemisphere ROI index + rh_constant
        overwrite (bool):
            If true: delete old atlas and create new one
            

    Returns:
        atlas (Nifti1Image):
            T1w Atlas with voxel values indicative of ROI identity (as defined in 'roi_ids')
    '''
    atlas_path = os.path.join(
        mask_dir, "sub-" + str(sub).zfill(2) + "_Mask_T1w_Glasser.nii.gz")

    if os.path.isfile(atlas_path) and not overwrite:
        atlas = nifti1.load(atlas_path)
    else:
        # create atlas
        hemi_dict = {'L': 'lh.L_', 'R': 'rh.R_'}
        hemi_add = {'L': 0, 'R': rh_constant}
        mask_array = []
        mask_array_tmp = []
        for roi in roi_ids.keys():
            for hemi in ['L', 'R']:
                mask_raw = nifti1.load(
                    os.path.join(mask_dir,
                                 hemi_dict[hemi] + roi + '_ROI.fbr.nii.gz'))
                if len(mask_array) == 0:
                    mask_array_tmp = mask_raw.get_fdata()
                    mask_array_tmp[
                        mask_array_tmp == 1] = roi_ids[roi][0] + hemi_add[
                            hemi]  # convert binary mask by assigning roi_id to masked voxels
                    mask_array = mask_array_tmp.copy()
                    freesurfer_affine = mask_raw.affine.copy()
                else:
                    mask_array_tmp = mask_raw.get_fdata()
                    mask_array[mask_array_tmp ==
                               1] = roi_ids[roi][0] + hemi_add[hemi]
        atlas = nifti1.Nifti1Image(mask_array, freesurfer_affine)
        nifti1.save(atlas, atlas_path)
    return atlas
예제 #9
0
def make_roi_mask(atlas,
                  betas_example,
                  roi_id,
                  fwhm=None,
                  interpolation='nearest'):
    '''
    Extract ROI-specific mask from Atlas in T1w space and sample it down to have
    the dimensions of the to-be-masked image of beta coefficients

    Args:
        atlas (Nifti1Image):
            Atlas with voxel values indicative of ROI identity
        betas_example (Nifti1Image):
            Example image of GLM coefficients which will be masked later (only needed for shape and affine)
        roi_id (int):
            ROI-specific integer used in "atlas"
        fwhm (float OR np.ndarray OR None):
            FWHM in mm (along each axis, axis-specific OR skip smoothing)
        interpolation (str):
            Interpolation method by nilearn.image.resample_img (consult nilearn documentation for other options)
    
    Returns:
        mask_nifti_r (Nifti1Image):
            resampled ROI-specific mask image (target dimensions)
        mask_nifti_s (Nifti1Image):
            smoothed ROI-specific mask image (original dimensions)
        mask_nifti (Nifti1Image):
            original binary ROI-specific mask image

    '''

    # Extract atlas and target nii data
    atlas_array = atlas.get_fdata()
    atlas_affine = atlas.affine.copy()
    betas_array = betas_example.get_fdata()
    betas_affine = betas_example.affine.copy()

    # Extract ROI-specific mask and resample it
    mask = (atlas_array == roi_id).astype(float)
    mask_nifti = nifti1.Nifti1Image(mask, atlas_affine)

    # Gaussian smoothing of mask
    if fwhm is not None:
        mask_nifti_s = image.smooth_img(mask_nifti, fwhm)
    else:
        mask_nifti_s = mask_nifti

    # Resample mask
    mask_nifti_r = image.resample_img(mask_nifti_s,
                                      target_affine=betas_affine,
                                      target_shape=betas_array.shape,
                                      interpolation=interpolation,
                                      copy=True)
    return mask_nifti_r, mask_nifti_s, mask_nifti
예제 #10
0
def non_overlapping_segmentation(meshfiles, reffile, resolve):
    refnii = radiological.load(reffile)
    meshes = [loadmesh(mf) for mf in meshfiles]

    allmasks = np.zeros(refnii.shape + (len(meshes), ))

    for i, mesh in enumerate(meshes):
        imd = vtk.vtkImageData()
        imd.SetExtent(0, refnii.shape[0] - 1, 0, refnii.shape[1] - 1, 0,
                      refnii.shape[2] - 1)
        imd.SetSpacing(*refnii.get_header().get_zooms())

        filt = vtk.vtkSelectEnclosedPoints()
        filt.SetInputData(imd)
        filt.SetSurfaceData(mesh)
        filt.SetTolerance(0.00001)
        filt.Update()

        mask = np.reshape(vtknp.vtk_to_numpy(
            filt.GetOutput().GetPointData().GetArray('SelectedPoints')).copy(),
                          refnii.shape,
                          order='F')

        allmasks[..., i] = mask

    labels = (allmasks * np.arange(1, 1 + len(meshes))).sum(axis=3)
    contested = np.transpose(np.nonzero(allmasks.sum(axis=3) > 1))

    if resolve:
        ipdds = list()
        for mesh in meshes:
            ipdd = vtk.vtkImplicitPolyDataDistance()
            ipdd.SetInput(mesh)
            ipdds.append(ipdd)

        for voxel in contested:
            # NB: This also includes the meshes that do not include the point, but those meshes will have higher distances
            #     and will not influence the result
            dists = [
                ipdd.FunctionValue(*(voxel * refnii.get_header().get_zooms()))
                for ipdd in ipdds
            ]
            labels[voxel[0], voxel[1],
                   voxel[2]] = np.argmin(np.array(dists)) + 1
    else:
        for voxel in contested:
            labels[voxel[0], voxel[1], voxel[2]] = 0

    result = nii.Nifti1Image(labels, refnii.affine)
    result.set_qform(refnii.affine)

    return result
예제 #11
0
def merge_freesurfer_masks(mask_dict_raw, merge_list, merged_names):
    '''
    Merge prespecified tuples of ROIs in mask_dict_raw for both hemispheres.

    Args:
        mask_dict_raw (dict):
            {hemi_roi : nifti object of the corresponding binary mask}
        merge_list (list):
            List of tuples of ROI names. All ROIs within a Tuple will be merged.
        merged_names (list):
            Corresponding new names for merged masks.
        
    Returns:
        atlas (Nifti1Image):
            T1w Atlas with voxel values indicative of ROI identity
        roi_ids_m (dict):
            {target_ROI: Left Hemisphere ROI index}, where specified target_ROI's are merged

    '''
    assert len(merge_list) == len(
        merged_names
    ), "Every tuple in 'merge_list' requires a unique new name, provided in merged_names."

    mask_dict = mask_dict_raw.copy()
    s = 0  # counter for merged_names
    for sublist in merge_list:
        for hemi in ['L', 'R']:
            mask_raw = mask_dict[sublist[0] + '_' + hemi]
            mask_array = mask_raw.get_fdata()
            mask_affine = mask_raw.affine.copy()

            for m in range(1, len(sublist)):
                mask_array += mask_dict[
                    sublist[m] + '_' + hemi].get_fdata()  # add remaining masks
            mask_array[mask_array > 1] = 1  # set overlapping voxels to 1
            mask_merged = nifti1.Nifti1Image(mask_array, mask_affine)
            mask_dict.update({merged_names[s] + '_' + hemi: mask_merged})
        s += 1

    # Remove original parts of now merged masks
    m = 0
    for sublist in merge_list:
        for roi in sublist:
            for hemi in ['L', 'R']:
                mask_dict.pop(roi + '_' + hemi, None)
                m += 1

    assert len(mask_dict) == len(mask_dict_raw) - m + 2 * len(
        merged_names
    ), "Unexpected number of masks in final mask_dict. Check, if merged_names are different from strings in merge_list."

    return mask_dict
예제 #12
0
def test_two_to_one():
    # test going from two to one file in save
    shape = (2, 4, 6)
    npt = np.float32
    data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
    affine = np.diag([1, 2, 3, 1])
    affine[:3, 3] = [3, 2, 1]
    # single file format
    img = ni1.Nifti1Image(data, affine)
    yield assert_equal(img.get_header()['magic'], 'n+1')
    str_io = StringIO()
    img.file_map['image'].fileobj = str_io
    # check that the single format vox offset is set correctly
    img.to_file_map()
    yield assert_equal(img.get_header()['magic'], 'n+1')
    yield assert_equal(img.get_header()['vox_offset'], 352)
    # make a new pair image, with the single image header
    pimg = ni1.Nifti1Pair(data, affine, img.get_header())
    isio = StringIO()
    hsio = StringIO()
    pimg.file_map['image'].fileobj = isio
    pimg.file_map['header'].fileobj = hsio
    pimg.to_file_map()
    # the offset remains the same
    yield assert_equal(pimg.get_header()['magic'], 'ni1')
    yield assert_equal(pimg.get_header()['vox_offset'], 352)
    yield assert_array_equal(pimg.get_data(), data)
    # same for from_image, going from single image to pair format
    ana_img = ana.AnalyzeImage.from_image(img)
    yield assert_equal(ana_img.get_header()['vox_offset'], 352)
    # back to the single image, save it again to a stringio
    str_io = StringIO()
    img.file_map['image'].fileobj = str_io
    img.to_file_map()
    yield assert_equal(img.get_header()['vox_offset'], 352)
    aimg = ana.AnalyzeImage.from_image(img)
    yield assert_equal(aimg.get_header()['vox_offset'], 352)
    aimg = spm99.Spm99AnalyzeImage.from_image(img)
    yield assert_equal(aimg.get_header()['vox_offset'], 352)
    aimg = spm2.Spm2AnalyzeImage.from_image(img)
    yield assert_equal(aimg.get_header()['vox_offset'], 352)
    nfimg = ni1.Nifti1Pair.from_image(img)
    yield assert_equal(nfimg.get_header()['vox_offset'], 352)
    # now set the vox offset directly
    hdr = nfimg.get_header()
    hdr['vox_offset'] = 0
    yield assert_equal(nfimg.get_header()['vox_offset'], 0)
    # check it gets properly set by the nifti single image
    nfimg = ni1.Nifti1Image.from_image(img)
    yield assert_equal(nfimg.get_header()['vox_offset'], 352)
def cluster(dataset, distances, roi, n_clusters):
    print "Clustering: " + str(n_clusters)
    clustering_algorithm = KMeans(n_clusters = n_clusters)
    clustering_algorithm.fit(distances) 

    labels = clustering_algorithm.predict(distances) + 1
    
    ### Try shortening this
    header = dataset.masker.get_header()
    header['cal_max'] = labels.max()
    header['cal_min'] = labels.min()
    voxel_labels = roi.masker.unmask(labels)
    img = nifti1.Nifti1Image(voxel_labels, None, header)
        
    return img
예제 #14
0
def load(filename):
    vol = nii.load(filename)

    if np.linalg.det(vol.get_affine()) > 0.0:
        imgdata = vol.get_data()[::-1, ...]
        flipmat = np.array([[-1.0, 0.0, 0.0, imgdata.shape[0] - 1.0],
                            [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],
                            [0.0, 0.0, 0.0, 1.0]])
        affine = vol.affine.dot(flipmat)
    else:
        imgdata = vol.get_data()
        affine = vol.affine

    rvol = nii.Nifti1Image(imgdata, affine)
    rvol.set_qform(affine)

    return rvol
예제 #15
0
def merge_masks(atlas_o, roi_ids, merge_list, merged_names, rh_constant=200):
    '''
    Merge prespecified tuples of ROIs in original atlas for both hemispheres.
    This function assumes that IDs for right hemisphere ROIs are a function of IDs for left hemisphere ROIs

    Args:
        atlas_o (Nifti1Image):
            Original T1w Atlas with voxel values indicative of ROI identity
        roi_ids (dict):
            {target_ROI: Left Hemisphere ROI index}
        merge_list (list):
            List of tuples of ROI names. All ROIs within a Tuple will be merged.
        rh_constant (int):
            Right Hemisphere ROI index := Left Hemisphere ROI index + rh_constant
        
    Returns:
        atlas (Nifti1Image):
            T1w Atlas with voxel values indicative of ROI identity
        roi_ids_m (dict):
            {target_ROI: Left Hemisphere ROI index}, where specified target_ROI's are merged

    '''

    # Extract atlas and target nii data
    atlas_array = atlas_o.get_fdata()
    atlas_affine = atlas_o.affine.copy()
    s = 0

    for sublist in merge_list:
        # take first ROI in tuple as reference
        ref_id = roi_ids[sublist[0]][0]
        for other_roi in sublist[1:]:
            # Remap each remaining ROI to the reference ROI
            rest_id = roi_ids[other_roi][0]
            atlas_array[atlas_array == rest_id] = ref_id
            if rh_constant is not None:
                atlas_array[
                    atlas_array == rest_id +
                    rh_constant] = ref_id + rh_constant  # repeat for right hemisphere
            roi_ids.pop(other_roi)
        roi_ids[merged_names[s]] = roi_ids.pop(sublist[0])
        s += 1
    atlas = nifti1.Nifti1Image(atlas_array, atlas_affine)
    roi_ids_m = roi_ids.copy()
    return atlas, roi_ids_m
예제 #16
0
def threshold_mask(mask_EPI,
                   threshold=0.5,
                   voxel_dimensions=None,
                   mask_T1w=None):
    '''
    After smoothing, the downsampled masks are not binary anymore. Make a mask binary again.
    
    Args:
        mask (Nifti1Image):
            resampled ROI-specific mask image 
        threshold (float):
            between 0 and 1
    
    Returns:
        mask_t (Nifti1Image):
            binary resampled ROI-specific mask image after thresholding

    '''
    mask_array = mask_EPI.get_fdata()

    assert type(
        threshold
    ) == float or threshold == 'adaptive', "threshold must be floating point number or 'adaptive'."
    if type(threshold) == float:
        assert (threshold >= 0 and
                threshold <= 1), "numeric threshold must be between 0 and 1."
        assert np.unique(
            mask_array
        )[1] < threshold, "numeric threshold set too low, all values will pass."
    else:
        assert type(
            voxel_dimensions
        ) == dict, "for adaptive thresholding you must provide the voxel dimensions in mm."

    if type(threshold) == float:
        mask_thresholded = (mask_array >= threshold).astype(float)
    else:
        mask_array_T1w = mask_T1w.get_fdata()
        geom_vol_T1w = sum(sum(sum(mask_array_T1w))) * np.prod(
            voxel_dimensions['T1w'])
        mask_thresholded = optimize_threshold(mask_array, geom_vol_T1w,
                                              voxel_dimensions)

    mask_t = nifti1.Nifti1Image(mask_thresholded, mask_EPI.affine.copy())
    return mask_t
예제 #17
0
def test_save_load():
    shape = (2, 4, 6)
    npt = np.float32
    data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
    affine = np.diag([1, 2, 3, 1])
    affine[:3, 3] = [3, 2, 1]
    img = ni1.Nifti1Image(data, affine)
    img.set_data_dtype(npt)
    with InTemporaryDirectory() as pth:
        pth = mkdtemp()
        nifn = pjoin(pth, 'an_image.nii')
        sifn = pjoin(pth, 'another_image.img')
        ni1.save(img, nifn)
        re_img = nils.load(nifn)
        yield assert_true(isinstance(re_img, ni1.Nifti1Image))
        yield assert_array_equal(re_img.get_data(), data)
        yield assert_array_equal(re_img.get_affine(), affine)
        # These and subsequent del statements are to prevent confusing
        # windows errors when trying to open files or delete the
        # temporary directory.
        del re_img
        try:
            import scipy.io
        except ImportError:
            # ignore if there is no matfile reader, and restart
            pass
        else:
            spm2.save(img, sifn)
            re_img2 = nils.load(sifn)
            yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage))
            yield assert_array_equal(re_img2.get_data(), data)
            yield assert_array_equal(re_img2.get_affine(), affine)
            del re_img2
            spm99.save(img, sifn)
            re_img3 = nils.load(sifn)
            yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage))
            yield assert_array_equal(re_img3.get_data(), data)
            yield assert_array_equal(re_img3.get_affine(), affine)
            ni1.save(re_img3, nifn)
            del re_img3
        re_img = nils.load(nifn)
        yield assert_true(isinstance(re_img, ni1.Nifti1Image))
        yield assert_array_equal(re_img.get_data(), data)
        yield assert_array_equal(re_img.get_affine(), affine)
        del re_img
예제 #18
0
def cluster_ward(dataset, distances, roi, regions):
    print "Clustering: "

    Z = ward(distances)

    results = []
    for n_reg in regions:
        labels = fcluster(Z, n_reg, 'maxclust')

        ### Try shortening this
        header = dataset.masker.get_header()
        header['cal_max'] = labels.max()
        header['cal_min'] = labels.min()
        voxel_labels = roi.masker.unmask(labels)
        img = nifti1.Nifti1Image(voxel_labels, None, header)
        results.append(img)

    return results
예제 #19
0
def run(options, args):
    filename = args[0]
    output = args[1]
    outfolder = os.path.dirname(output)

    basefilename = os.path.basename(output)
    root, ext = os.path.splitext(basefilename)
    if ext in ['.gz']:
        tok = os.path.splitext(root)
        root = tok[0]
        ext = tok[1] + ext

    print "output to", output

    img = nib.loadsave.load(filename)

    affine = np.matrix(img.get_header().get_best_affine())
    shape = np.array(img.shape)
    zooms = np.array(img.get_header().get_zooms())
    real_dim = shape * zooms

    is_apply_matrix = False
    if options.matrix:
        is_apply_matrix = True
        output = outfolder + root + ext
        trans = np.matrix(np.loadtxt(options.matrix))
    else:
        output = outfolder + root + "_centered" + ext
        invert_trans_file = outfolder + root + "_inv.txt"

        trans = np.matrix(np.identity(4))
        trans[0, 3] = affine[0, 3] * -1
        trans[1, 3] = affine[1, 3] * -1
        trans[2, 3] = affine[2, 3] * -1

    new_affine = trans * affine

    nimg = nutils.Nifti1Image(img.get_data(), new_affine, img.get_header())
    nib.loadsave.save(nimg, output)

    if not is_apply_matrix:
        np.savetxt(invert_trans_file, trans.I)
예제 #20
0
def test_save_load():
    shape = (2, 4, 6)
    npt = np.float32
    data = np.arange(np.prod(shape), dtype=npt).reshape(shape)
    affine = np.diag([1, 2, 3, 1])
    affine[:3, 3] = [3, 2, 1]
    img = ni1.Nifti1Image(data, affine)
    img.set_data_dtype(npt)
    with InTemporaryDirectory() as pth:
        nifn = 'an_image.nii'
        sifn = 'another_image.img'
        ni1.save(img, nifn)
        re_img = nils.load(nifn)
        yield assert_true(isinstance(re_img, ni1.Nifti1Image))
        yield assert_array_equal(re_img.get_data(), data)
        yield assert_array_equal(re_img.get_affine(), affine)
        # These and subsequent del statements are to prevent confusing
        # windows errors when trying to open files or delete the
        # temporary directory.
        del re_img
        if have_scipy:  # skip we we cannot read .mat files
            spm2.save(img, sifn)
            re_img2 = nils.load(sifn)
            yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage))
            yield assert_array_equal(re_img2.get_data(), data)
            yield assert_array_equal(re_img2.get_affine(), affine)
            del re_img2
            spm99.save(img, sifn)
            re_img3 = nils.load(sifn)
            yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage))
            yield assert_array_equal(re_img3.get_data(), data)
            yield assert_array_equal(re_img3.get_affine(), affine)
            ni1.save(re_img3, nifn)
            del re_img3
        re_img = nils.load(nifn)
        yield assert_true(isinstance(re_img, ni1.Nifti1Image))
        yield assert_array_equal(re_img.get_data(), data)
        yield assert_array_equal(re_img.get_affine(), affine)
        del re_img
예제 #21
0
def convolve_image(img, r=4, header=None, method='mean', save=None):
    """ Take an image and multiples every non-zero value found by a hard
    sphere of radius r. Where multiple values overlap, use either sum or
    mean. """
    img = nb.load(img)
    data = img.get_data()
    dims = data.shape
    result = np.zeros(dims)
    counts = np.zeros(dims)
    nonzero = np.nonzero(data)
    for point in zip(*nonzero):
        fill = tuple(get_sphere(point, r, dims=dims).T)
        # fill = tuple(fill)
        result[fill] += data[point]
        counts[fill] += 1
    result = np.divide(result, counts)
    result = np.nan_to_num(result)
    if save is None:
        return result
    else:
        img = nifti1.Nifti1Image(result, None, img.get_header())
        img.to_filename(save)
def cv_cluster(dataset, reference, mask_img, train_index):
    n_clusters = np.unique(mask_img.get_data()).nonzero()[0].shape[0]
    mask_img = binarize_nib(mask_img)
    roi = Clusterable(dataset, mask_img)
    roi.data = roi.data[:, train_index]

    print "Computing roi ref distances..."
    distances = pairwise_distances(roi.data, reference, metric='correlation')

    print "Clustering: " + str(n_clusters)
    clustering_algorithm = KMeans(n_clusters=n_clusters)
    clustering_algorithm.fit(distances)

    labels = clustering_algorithm.predict(distances) + 1

    ### Try shortening this
    header = dataset.masker.get_header()
    header['cal_max'] = labels.max()
    header['cal_min'] = labels.min()
    voxel_labels = roi.masker.unmask(labels)
    img = nifti1.Nifti1Image(voxel_labels, None, header)

    return img
def cv_cluster_ward(dataset, reference, mask_img, train_index, n_regions):
    mask_img = binarize_nib(mask_img)
    roi = Clusterable(dataset, mask_img)
    roi.data = roi.data[:, train_index]

    print "Computing roi ref distances..."
    distances = pairwise_distances(roi.data, reference, metric='correlation')
    distances[np.isnan(distances)] = 1

    clustering_algorithm = FastWard()
    clustering_algorithm.fit(distances)

    imgs = []

    for region in n_regions:
        labels = clustering_algorithm.predict(region, step=0.01) + 1
        header = dataset.masker.get_header()
        header['cal_max'] = labels.max()
        header['cal_min'] = labels.min()
        voxel_labels = roi.masker.unmask(labels)
        imgs.append(nifti1.Nifti1Image(voxel_labels, None, header))

    return imgs
def get3Dskeleton(imgPath=None, thresh='30%', mask=True, verbose=0):
    """
    Gets the 3D skeleton of an image
    imgPath -> Relative or absolute path to a .nii image or nii variable
    thresh (optional, default='30%') -> Threshold value. Either float or string like 'x.xx%'
    mask (optional, default=True) -> Applies a gray matter mask if true
    verbose (optional, default=0) -> int - the higher it is, the more messages will be outputted to the console
    Returns a Nifti1Image object containing the skeletonized image
    """
    brain = image.load_img(imgPath)
    if (mask):
        imgMask = masking.compute_gray_matter_mask(target_img=brain,
                                                   threshold=0.3,
                                                   connected=True,
                                                   opening=1)
        brain = image.math_img('img1 * img2', img1=brain, img2=imgMask)
    brain = image.threshold_img(brain, thresh)
    affine = brain.affine
    data = brain.get_data()
    data = data / np.amax(data)
    #data = filters.threshold_adaptive(data, 35, 10)
    skeleton = morphology.skeletonize_3d(data)
    skeletonBrain = nifti.Nifti1Image(skeleton, affine)
    return skeletonBrain
예제 #25
0
def apply_roi_mask_SPM(glm_dir,
                       img_num,
                       mask,
                       target='betas',
                       method='nilearn'):
    '''
    Load 3-D image for prespecified beta-coefficient or residual and apply mask to get a pattern vector
    This function assumes that beta and residual images are in the same directory and follow the SPM naming convention
    
    Args:
        glm_dir (str):
            Path to SPM outputs
        img_num (int):
            Stimulus number
        mask (Nifti1Image):
            To be applied mask (must have same dimensions as the GLM betas image)
        method (str):
            Method for applying the mask 

    Returns:
        beta_vector (1-D array OR Nifti Image):
            Vector of Beta-coefficients for each voxel in mask OR Nifti Image after masking
    '''
    # Check args
    if target not in ['betas', 'residuals']:
        print(
            target,
            "is not a valid SPM output descriptor. Please use 'betas' or 'residuals'"
        )
    if method not in ['nilearn', 'custom', '3d']:
        print(
            method,
            "is not a valid method descriptor. Please use 'nilearn', 'custom', or '3d'"
        )

    # Load image
    if target == 'betas':
        betas_path = os.path.join(glm_dir,
                                  "beta_" + str(img_num).zfill(4) + ".nii")
        image = nifti1.load(betas_path)
    elif target == 'residuals':
        residuals_path = os.path.join(glm_dir,
                                      "Res_" + str(img_num).zfill(4) + ".nii")
        image = nifti1.load(residuals_path)

    # Apply mask
    if method == 'nilearn':
        masked = masking.apply_mask(image,
                                    mask,
                                    dtype='f',
                                    smoothing_fwhm=None,
                                    ensure_finite=True)
    elif method == 'custom':
        # Alternative to nilearn's implementation (allows for plotting of masked 3d image)
        mask_array = mask.get_fdata()
        image_array = image.get_fdata()
        masked = image_array[mask_array.astype(bool)]
    elif method == '3d':
        mask_array = mask.get_fdata()
        image_array = image.get_fdata()
        image_masked = np.multiply(mask_array, image_array)
        masked = nifti1.Nifti1Image(image_masked, mask.affine.copy())

    return masked
예제 #26
0
# Set directories, specify ROIs and load dictionary for labels
TR = 3
ds_dir = "/home/alex/Datasets/ds001246/"
spm_dir = os.path.join(ds_dir, "derivatives", spm_type)
n_subs = len(glob.glob(ds_dir + os.sep + "sub*"))

##############################################################################

for sub in range(1, n_subs + 1):

    img_output_dir = os.path.join(spm_dir, "sub-" + str(sub).zfill(2))

    # For each run, get averaged condition-specific residuals
    pooled_residuals_array, generic_affine, fourth_dimension_descriptors = \
        pooled_residuals_pipeline(sub, spm_dir, spm_type, ses_type, TR,
                                  drop_first_res)

    # Make subject-specific nifti image of block-averaged residuals
    pooled_residuals = nifti1.Nifti1Image(pooled_residuals_array,
                                          generic_affine)
    nifti_filename = os.path.join(
        img_output_dir,
        "sub-" + str(sub).zfill(2) + "_V_" + stimulus_set + ".nii.gz")
    nifti1.save(pooled_residuals, nifti_filename)
    csv_filename = os.path.join(
        img_output_dir,
        "sub-" + str(sub).zfill(2) + "_uq_conditions_" + stimulus_set + ".csv")
    df = pd.DataFrame({'descriptor': fourth_dimension_descriptors})
    df.to_csv(csv_filename, header=False)
예제 #27
0
파일: mask.py 프로젝트: dohmatob/nipy
def intersect_masks(input_masks, output_filename=None, threshold=0.5, cc=True):
    """
    Given a list of input mask images, generate the output image which
    is the the threshold-level intersection of the inputs

    Parameters
    ----------
    input_masks: list of strings or ndarrays
        paths of the input images nsubj set as len(input_mask_files), or
        individual masks.
    output_filename, string:
        Path of the output image, if None no file is saved.
    threshold: float within [0, 1[, optional
        gives the level of the intersection.
        threshold=1 corresponds to keeping the intersection of all
        masks, whereas threshold=0 is the union of all masks.
    cc: bool, optional
        If true, extract the main connected component

    Returns
    -------
    grp_mask, boolean array of shape the image shape
    """
    grp_mask = None
    if threshold > 1:
        raise ValueError('The threshold should be < 1')
    if threshold < 0:
        raise ValueError('The threshold should be > 0')
    threshold = min(threshold, 1 - 1.e-7)

    for this_mask in input_masks:
        if isinstance(this_mask, basestring):
            # We have a filename
            this_mask = load(this_mask).get_data()
        if grp_mask is None:
            grp_mask = this_mask.copy().astype(np.int)
        else:
            # If this_mask is floating point and grp_mask is integer, numpy 2
            # casting rules raise an error for in-place addition.
            # Hence we do it long-hand.
            # XXX should the masks be coerced to int before addition?
            grp_mask = grp_mask + this_mask

    grp_mask = grp_mask > (threshold * len(list(input_masks)))

    if np.any(grp_mask > 0) and cc:
        grp_mask = largest_cc(grp_mask)

    if output_filename is not None:
        if isinstance(input_masks[0], basestring):
            nim = load(input_masks[0])
            header = nim.get_header()
            affine = nim.get_affine()
        else:
            header = dict()
            affine = np.eye(4)
        header['descrip'] = 'mask image'
        output_image = nifti1.Nifti1Image(
            grp_mask.astype(np.uint8),
            affine=affine,
            header=header,
        )
        save(output_image, output_filename)

    return grp_mask > 0
예제 #28
0
파일: mask.py 프로젝트: dohmatob/nipy
def compute_mask_files(input_filename,
                       output_filename=None,
                       return_mean=False,
                       m=0.2,
                       M=0.9,
                       cc=1,
                       exclude_zeros=False,
                       opening=2):
    """
    Compute a mask file from fMRI nifti file(s)

    Compute and write the mask of an image based on the grey level
    This is based on an heuristic proposed by T.Nichols:
    find the least dense point of the histogram, between fractions
    m and M of the total image histogram.

    In case of failure, it is usually advisable to increase m.

    Parameters
    ----------
    input_filename : string
        nifti filename (4D) or list of filenames (3D).
    output_filename : string or None, optional
        path to save the output nifti image (if not None).
    return_mean : boolean, optional
        if True, and output_filename is None, return the mean image also, as
        a 3D array (2nd return argument).
    m : float, optional
        lower fraction of the histogram to be discarded.
    M: float, optional
        upper fraction of the histogram to be discarded.
    cc: boolean, optional
        if cc is True, only the largest connect component is kept.
    exclude_zeros: boolean, optional
        Consider zeros as missing values for the computation of the
        threshold. This option is useful if the images have been
        resliced with a large padding of zeros.
    opening: int, optional
        Size of the morphological opening performed as post-processing

    Returns
    -------
    mask : 3D boolean array
        The brain mask
    mean_image : 3d ndarray, optional
        The main of all the images used to estimate the mask. Only
        provided if `return_mean` is True.
    """
    if isinstance(input_filename, basestring) or is_niimg(input_filename):
        # One single filename or image
        if isinstance(input_filename, basestring):
            nim = load(input_filename)  # load the image from the path
        else:
            nim = input_filename

        try:
            vol_arr = read_img_data(nim, prefer='unscaled')
        except nibabel.spatialimages.ImageFileError:
            vol_arr = nim.get_data()

        header = nim.get_header()
        affine = nim.get_affine()
        if vol_arr.ndim == 4:
            if isinstance(vol_arr, np.memmap):
                # Get rid of memmapping: it is faster.
                mean_volume = np.array(vol_arr, copy=True).mean(axis=-1)
            else:
                mean_volume = vol_arr.mean(axis=-1)
            # Make a copy, to avoid holding a reference on the full array,
            # and thus polluting the memory.
            first_volume = vol_arr[..., 0].copy()
        elif vol_arr.ndim == 3:
            mean_volume = first_volume = vol_arr
        else:
            raise ValueError('Need 4D file for mask')
        del vol_arr
    else:
        # List of filenames
        if len(list(input_filename)) == 0:
            raise ValueError('input_filename should be a non-empty '
                             'list of file names')
        # We have several images, we do mean on the fly,
        # to avoid loading all the data in the memory
        # We do not use the unscaled data here?:
        # if the scalefactor is being used to record real
        # differences in intensity over the run this would break
        for index, filename in enumerate(input_filename):
            nim = load(filename)
            if index == 0:
                first_volume = nim.get_data().squeeze()
                mean_volume = first_volume.copy().astype(np.float32)
                header = nim.get_header()
                affine = nim.get_affine()
            else:
                mean_volume += nim.get_data().squeeze()
        mean_volume /= float(len(list(input_filename)))
    del nim
    if np.isnan(mean_volume).any():
        tmp = mean_volume.copy()
        tmp[np.isnan(tmp)] = 0
        mean_volume = tmp

    mask = compute_mask(mean_volume,
                        first_volume,
                        m,
                        M,
                        cc,
                        opening=opening,
                        exclude_zeros=exclude_zeros)

    if output_filename is not None:
        header['descrip'] = 'mask'
        output_image = nifti1.Nifti1Image(mask.astype(np.uint8),
                                          affine=affine,
                                          header=header)
        save(output_image, output_filename)
    if not return_mean:
        return mask
    else:
        return mask, mean_volume
예제 #29
0
def proc_file(infile, opts):
    # figure out the output filename, and see if it exists
    basefilename = splitext_addext(os.path.basename(infile))[0]
    if opts.outdir is not None:
        # set output path
        basefilename = os.path.join(opts.outdir, basefilename)

    # prep a file
    if opts.compressed:
        verbose('Using gzip compression')
        outfilename = basefilename + '.nii.gz'
    else:
        outfilename = basefilename + '.nii'
    if os.path.isfile(outfilename) and not opts.overwrite:
        raise IOError('Output file "%s" exists, use --overwrite to '
                      'overwrite it' % outfilename)

    # load the PAR header and data
    scaling = 'dv' if opts.scaling == 'off' else opts.scaling
    infile = fname_ext_ul_case(infile)
    pr_img = pr.load(infile,
                     permit_truncated=opts.permit_truncated,
                     scaling=scaling,
                     strict_sort=opts.strict_sort)
    pr_hdr = pr_img.header
    affine = pr_hdr.get_affine(origin=opts.origin)
    slope, intercept = pr_hdr.get_data_scaling(scaling)
    if opts.scaling != 'off':
        verbose('Using data scaling "%s"' % opts.scaling)
    # get original scaling, and decide if we scale in-place or not
    if opts.scaling == 'off':
        slope = np.array([1.])
        intercept = np.array([0.])
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)):
        # Single scalefactor case
        slope = slope.ravel()[0]
        intercept = intercept.ravel()[0]
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    else:
        # Multi scalefactor case
        slope = np.array([1.])
        intercept = np.array([0.])
        in_data = np.array(pr_img.dataobj)
        out_dtype = np.float64
    # Reorient data block to LAS+ if necessary
    ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine))
    if np.all(ornt == [[0, 1], [1, 1], [2, 1]]):  # already in LAS+
        t_aff = np.eye(4)
    else:  # Not in LAS+
        t_aff = inv_ornt_aff(ornt, pr_img.shape)
        affine = np.dot(affine, t_aff)
        in_data = apply_orientation(in_data, ornt)

    bvals, bvecs = pr_hdr.get_bvals_bvecs()
    if not opts.keep_trace:  # discard Philips DTI trace if present
        if bvecs is not None:
            bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1))
            if bad_mask.sum() > 0:
                pl = 's' if bad_mask.sum() != 1 else ''
                verbose('Removing %s DTI trace volume%s' %
                        (bad_mask.sum(), pl))
                good_mask = ~bad_mask
                in_data = in_data[..., good_mask]
                bvals = bvals[good_mask]
                bvecs = bvecs[good_mask]

    # Make corresponding NIfTI image
    nimg = nifti1.Nifti1Image(in_data, affine, pr_hdr)
    nhdr = nimg.header
    nhdr.set_data_dtype(out_dtype)
    nhdr.set_slope_inter(slope, intercept)
    nhdr.set_sform(affine, code=1)
    nhdr.set_qform(affine, code=1)

    if 'parse' in opts.minmax:
        # need to get the scaled data
        verbose('Loading (and scaling) the data to determine value range')
    if opts.minmax[0] == 'parse':
        nhdr['cal_min'] = in_data.min() * slope + intercept
    else:
        nhdr['cal_min'] = float(opts.minmax[0])
    if opts.minmax[1] == 'parse':
        nhdr['cal_max'] = in_data.max() * slope + intercept
    else:
        nhdr['cal_max'] = float(opts.minmax[1])

    # container for potential NIfTI1 header extensions
    if opts.store_header:
        # dump the full PAR header content into an extension
        with open(infile, 'rb') as fobj:  # contents must be bytes
            hdr_dump = fobj.read()
            dump_ext = nifti1.Nifti1Extension('comment', hdr_dump)
        nhdr.extensions.append(dump_ext)

    verbose('Writing %s' % outfilename)
    nibabel.save(nimg, outfilename)

    # write out bvals/bvecs if requested
    if opts.bvs:
        if bvals is None and bvecs is None:
            verbose('No DTI volumes detected, bvals and bvecs not written')
        elif bvecs is None:
            verbose('DTI volumes detected, but no diffusion direction info was'
                    'found.  Writing .bvals file only.')
            with open(basefilename + '.bvals', 'w') as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write('%s ' % val)
                fid.write('\n')
        else:
            verbose('Writing .bvals and .bvecs files')
            # Transform bvecs with reorientation affine
            orig2new = npl.inv(t_aff)
            bv_reorient = from_matvec(to_matvec(orig2new)[0], [0, 0, 0])
            bvecs = apply_affine(bv_reorient, bvecs)
            with open(basefilename + '.bvals', 'w') as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write('%s ' % val)
                fid.write('\n')
            with open(basefilename + '.bvecs', 'w') as fid:
                for row in bvecs.T:
                    for val in row:
                        fid.write('%s ' % val)
                    fid.write('\n')

    # export data labels varying along the 4th dimensions if requested
    if opts.vol_info:
        labels = pr_img.header.get_volume_labels()
        if len(labels) > 0:
            vol_keys = list(labels.keys())
            with open(basefilename + '.ordering.csv', 'w') as csvfile:
                csvwriter = csv.writer(csvfile, delimiter=',')
                csvwriter.writerow(vol_keys)
                for vals in zip(*[labels[k] for k in vol_keys]):
                    csvwriter.writerow(vals)

    # write out dwell time if requested
    if opts.dwell_time:
        try:
            dwell_time = calculate_dwell_time(pr_hdr.get_water_fat_shift(),
                                              pr_hdr.get_echo_train_length(),
                                              opts.field_strength)
        except MRIError:
            verbose('No EPI factors, dwell time not written')
        else:
            verbose('Writing dwell time (%r sec) calculated assuming %sT '
                    'magnet' % (dwell_time, opts.field_strength))
            with open(basefilename + '.dwell_time', 'w') as fid:
                fid.write('%r\n' % dwell_time)
예제 #30
0
def remove_mask_overlap(mask_dict_r, mask_dict_t):
    '''
    Find overlapping voxels in thresholded masks and only keep the voxel with highest pre-threshold voxel intensity
    
    Args:
        mask_dict_r (dict):
            {ROI_name : smoothed and resampled mask}
        mask_dict_t (dict):
            The above after thresholding

    Returns:
        mask_dict_d (dict):
            mask_dict_t after removal of overlapping voxels
    '''
    macro_mask = []
    mask_dict_d = []
    overlap_indices = []
    mask_dict_d = mask_dict_t.copy()

    # Add up all thresholded masks
    for roi_h in mask_dict_t.keys():
        mask_array = mask_dict_t[roi_h].get_fdata()

        if len(macro_mask) == 0:
            macro_mask = mask_array.copy()
        else:
            macro_mask += mask_array.copy()

    # Find voxels with mask overlap
    overlap_indices = np.array(np.where(macro_mask > 1)).T

    # For each overlapping voxel: Create list of overlapping ROIs
    for idx_number in range(len(overlap_indices)):
        idx = tuple(overlap_indices[idx_number])
        overlap_list = []
        voxel_intensities = []

        for roi_h in mask_dict_r.keys():
            voxel = mask_dict_d[roi_h].get_fdata()[idx]
            if voxel == 1:
                overlap_list.append(roi_h)
        print("The ROIs", overlap_list, " overlap on voxel: ", idx)

        # Check which voxel value is largest on smoothed versions of the overlapping masks
        for overlap_mask in overlap_list:
            voxel_intensities.append(
                mask_dict_r[overlap_mask].get_fdata()[idx])
        most_intense = np.amax(voxel_intensities)
        most_intense_idx = np.where(voxel_intensities == most_intense)
        delete_idx = most_intense_idx[0][0]

        # Remove the ROI with higher voxel intensity from overlap list and set the voxel to 0 for all remaining entries
        del (overlap_list[delete_idx])
        print("Setting said voxel's intensity to 0 for the mask of: ",
              overlap_list)
        for roi_d in overlap_list:
            mask = mask_dict_d[roi_d]
            mask_array = mask.get_fdata()
            mask_array[idx] = 0
            mask_nifti = nifti1.Nifti1Image(mask_array, mask.affine.copy())
            mask_dict_d[roi_d] = mask_nifti

    return mask_dict_d