Example #1
0
    def _run_interface(self, runtime):
        img = nb.load(self.inputs.in_file[0], mmap=NUMPY_MMAP)
        header = img.header.copy()
        vollist = [nb.load(filename, mmap=NUMPY_MMAP) for filename in self.inputs.in_file]
        data = np.concatenate([vol.get_data().reshape(
            vol.shape[:3] + (-1,)) for vol in vollist], axis=3)
        data = np.nan_to_num(data)

        if data.dtype.kind == 'i':
            header.set_data_dtype(np.float32)
            data = data.astype(np.float32)

        if isdefined(self.inputs.regress_poly):
            data = regress_poly(self.inputs.regress_poly, data, remove_mean=False)[0]
            img = nb.Nifti1Image(data, img.affine, header)
            nb.save(img, op.abspath(self.inputs.detrended_file))

        meanimg = np.mean(data, axis=3)
        stddevimg = np.std(data, axis=3)
        tsnr = np.zeros_like(meanimg)
        tsnr[stddevimg > 1.e-3] = meanimg[stddevimg > 1.e-3] / stddevimg[stddevimg > 1.e-3]
        img = nb.Nifti1Image(tsnr, img.affine, header)
        nb.save(img, op.abspath(self.inputs.tsnr_file))
        img = nb.Nifti1Image(meanimg, img.affine, header)
        nb.save(img, op.abspath(self.inputs.mean_file))
        img = nb.Nifti1Image(stddevimg, img.affine, header)
        nb.save(img, op.abspath(self.inputs.stddev_file))
        return runtime
Example #2
0
    def _run_interface(self, runtime):
        from dipy.reconst import dti
        from dipy.io.utils import nifti1_symmat
        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit it
        tenmodel = dti.TensorModel(gtab)
        ten_fit = tenmodel.fit(data, mask)
        lower_triangular = ten_fit.lower_triangular()
        img = nifti1_symmat(lower_triangular, affine)
        out_file = self._gen_filename('dti')
        nb.save(img, out_file)
        IFLOGGER.info('DTI parameters image saved as {i}'.format(i=out_file))

        #FA MD RD and AD
        for metric in ["fa", "md", "rd", "ad"]:
            data = getattr(ten_fit,metric).astype("float32")
            out_name = self._gen_filename(metric)
            nb.Nifti1Image(data, affine).to_filename(out_name)
            IFLOGGER.info('DTI {metric} image saved as {i}'.format(i=out_name, metric=metric))

        return runtime
Example #3
0
def intersect_masks(input_masks, output_filename=None, threshold=0.5, cc=True):
    """
    Given a list of input mask images, generate the output image which
    is the the threshold-level intersection of the inputs 

    Parameters
    ----------
    input_masks: list of strings or ndarrays
        paths of the input images nsubj set as len(input_mask_files), or
        individual masks.
    output_filename, string:
        Path of the output image, if None no file is saved.
    threshold: float within [0, 1[, optional
        gives the level of the intersection.
        threshold=1 corresponds to keeping the intersection of all
        masks, whereas threshold=0 is the union of all masks.
    cc: bool, optional
        If true, extract the main connected component
        
    Returns
    -------
    grp_mask, boolean array of shape the image shape
    """  
    grp_mask = None
    if threshold > 1:
        raise ValueError('The threshold should be < 1')
    if threshold <0:
        raise ValueError('The threshold should be > 0')
    threshold = min(threshold, 1 - 1.e-7)

    for this_mask in input_masks:
        if isinstance(this_mask, basestring):
            # We have a filename
            this_mask = load(this_mask).get_data()
        if grp_mask is None:
            grp_mask = this_mask.copy().astype(np.int)
        else:
            grp_mask += this_mask
    
    grp_mask = grp_mask > (threshold * len(list(input_masks)))
    
    if np.any(grp_mask > 0) and cc:
        grp_mask = largest_cc(grp_mask)
    
    if output_filename is not None:
        if isinstance(input_masks[0], basestring):
            nim = load(input_masks[0]) 
            header = nim.get_header()
            affine = nim.get_affine()
        else:
            header = dict()
            affine = np.eye(4)
        header['descrip'] = 'mask image'
        output_image = nifti1.Nifti1Image(grp_mask.astype(np.uint8),
                                            affine=affine,
                                            header=header,
                                         )
        save(output_image, output_filename)

    return grp_mask > 0
Example #4
0
def tensor_fitting(data, bvals, bvecs, mask_file=None):
    """
    Use dipy to fit DTI

    Parameters
    ----------
    in_file : str
        Full path to a DWI data file.
    bvals : str
        Full path to a file containing gradient magnitude information (b-values).
    bvecs : str
        Full path to a file containing gradient direction information (b-vectors).
    mask_file : str, optional
        Full path to a file containing a binary mask. Defaults to use the entire volume.

    Returns
    -------
    TensorFit object, affine
    """
    img = nb.load(in_file).get_data()
    data = img.get_data()
    affine = img.get_affine()
    if mask_file is not None:
        mask = nb.load(self.inputs.mask_file).get_data()
    else:
        mask = None

    # Load information about the gradients:
    gtab = grad.gradient_table(self.inputs.bvals, self.inputs.bvecs)

    # Fit it
    tenmodel = dti.TensorModel(gtab)
    return tenmodel.fit(data, mask), affine
def extract_subrois(timeseries_file, label_file, indices):
    """Extract voxel time courses for each subcortical roi index

    Parameters
    ----------

    timeseries_file: a 4D Nifti file
    label_file: a 3D file containing rois in the same space/size of the 4D file
    indices: a list of indices for ROIs to extract.

    Returns
    -------
    out_file: a text file containing time courses for each voxel of each roi
        The first four columns are: freesurfer index, i, j, k positions in the
        label file
    """
    img = nb.load(timeseries_file)
    data = img.get_data()
    roiimg = nb.load(label_file)
    rois = roiimg.get_data()
    prefix = split_filename(timeseries_file)[1]
    out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
    with open(out_ts_file, 'wt') as fp:
        for fsindex in indices:
            ijk = np.nonzero(rois == fsindex)
            ts = data[ijk]
            for i0, row in enumerate(ts):
                fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
                                           ijk[1][i0], ijk[2][i0]) +
                         ','.join(['%.10f' % val for val in row]) + '\n')
    return out_ts_file
def cutBrains(paths_to_nii, file_selector=None):
    '''
    Takes the path to all of the .nii files that we want to put in the
    final dataset
    '''
    
    good_slices = []
    im_dims = None
    
    for path_to_nii in paths_to_nii:
        print("For directory %s ..."%(path_to_nii))
        all_in_dir = listdir(path_to_nii)
        niis = [f for f in all_in_dir if isfile(join(path_to_nii, f)) and f[-4:] == '.nii']

        if file_selector:
            niis = [nii for nii in niis if re.search(file_selector, nii)]

        if im_dims == None:
            ## take the first nii as an example (assumes all same shape)
            img = nibabel.load(join(path_to_nii, niis[0]))
            data = img.get_data()
            im_dims = data.shape[1:] ## take last two dims as images we'll want

        for i, nii in enumerate(niis):
            print("Processing brain %d of %d"%(i+1,len(niis)))
            img = nibabel.load(join(path_to_nii, nii))
            data = img.get_data()
            if data.shape[1:] != im_dims:
                print("Skipping this brain due to inconsistent size")
                continue
            ## chose to slice at the middle of the x dimension
            cut_loc = data.shape[0] / 2
            cut = data[cut_loc, :, :]

        
            ## convert memmap to array and drop it into the list with label
            ## and normalize
            cut = np.array(cut)
            cut /= np.max(cut)
            good_slices.append((cut, getLabel(path_to_nii)))

    
    X_dims = (len(good_slices), im_dims[0], im_dims[1])
    y_dims = (len(good_slices), 1)
    
    X = np.zeros(X_dims, dtype='float32')
    y = np.zeros(y_dims, dtype='uint8')

    ## shuffle all the examples so we can get a good number of each in the test set
    shuffle(good_slices)

    for i, (sl, label) in enumerate(good_slices):
        X[i, :, :] = sl
        y[i] = label

    with open(join(path_to_nii, 'ADNI_X_normcuts_big'), 'w+') as X_file:
        pickle.dump(X, X_file)

    with open(join(path_to_nii, 'ADNI_y_normcuts_big'), 'w+') as y_file:
        pickle.dump(y, y_file)
Example #7
0
    def _run_interface(self, runtime):
        from dipy.reconst import dki
        from dipy.io.utils import nifti1_symmat
        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit the DKI model
        kurtosis_model = dki.DiffusionKurtosisModel(gtab)
        kurtosis_fit = kurtosis_model.fit(data, mask)
        lower_triangular = kurtosis_fit.lower_triangular()
        img = nifti1_symmat(lower_triangular, affine)
        out_file = self._gen_filename('dki')
        nb.save(img, out_file)
        IFLOGGER.info('DKI parameters image saved as {i}.format(i=out_file)')

        # FA, MD, RD, and AD
        for metric in ['fa', 'md', 'rd', 'ad', 'mk', 'ak', 'rk']:
            data = getattr(kurtosis_fit.metric).astype('float32')
            out_name = self._gen_filename(metric)
            nb.Nifti1Image(data, affine).to_filename(out_name)
            IFLOGGER.info('DKI {metric} image saved as {i}'.format(i=out_name,
                                                                   metric=metric))

        return runtime
Example #8
0
def test_high_level_glm_with_data():
    shapes, rk = ((7, 6, 5, 20), (7, 6, 5, 19)), 3
    mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk)

    # without mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None)
    multi_session_model.fit()
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_equal(np.sum(z_image.get_data() == 0), 0)

    # compute the mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices,
                                          m=0, M=.01, threshold=0.)
    multi_session_model.fit()
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_true(z_image.get_data().std() < 3. )

    # with mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask)
    multi_session_model.fit()
    z_image, effect_image, variance_image= multi_session_model.contrast(
        [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True)
    assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
    assert_true(
        (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())

    # without scaling
    multi_session_model.fit(do_scaling=False)
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_true(z_image.get_data().std() < 3. )
def get_mean_timeseries(infile,roi,mask):
    import os
    import nibabel as nib
    from nipype.utils.filemanip import fname_presuffix, split_filename
    import numpy as np

    img = nib.load(infile)
    data, aff = img.get_data(), img.get_affine()

    roi_img = nib.load(roi) 
    roi_data, roi_affine = roi_img.get_data(), roi_img.get_affine()

    if len(roi_data.shape) > 3:
        roi_data = roi_data[:,:,:,0]

    mask = nib.load(mask).get_data()
    roi_data = (roi_data > 0).astype(int) + (mask>0).astype(int)

    _,roiname,_ = split_filename(roi)
    outfile = fname_presuffix(infile,"%s_"%roiname,'.txt',newpath=os.path.abspath('.'),use_ext=False)
    
    out_data = np.mean(data[roi_data>1,:],axis=0)
    print out_data.shape
    
    np.savetxt(outfile,out_data)

    return outfile, roiname
def plot_haxby(activation, title):
    z = 25

    fig = plt.figure(figsize=(4, 5.4))
    fig.subplots_adjust(bottom=0., top=1., left=0., right=1.)
    plt.axis('off')
    # pl.title('SVM vectors')
    plt.imshow(mean_img[:, 4:58, z].T, cmap=pl.cm.gray,
              interpolation='nearest', origin='lower')
    plt.imshow(activation[:, 4:58, z].T, cmap=pl.cm.hot,
              interpolation='nearest', origin='lower')

    mask_house = nib.load(h.mask_house[0]).get_data()
    mask_face = nib.load(h.mask_face[0]).get_data()

    plt.contour(mask_house[:, 4:58, z].astype(np.bool).T, contours=1,
            antialiased=False, linewidths=4., levels=[0],
            interpolation='nearest', colors=['blue'], origin='lower')

    plt.contour(mask_face[:, 4:58, z].astype(np.bool).T, contours=1,
            antialiased=False, linewidths=4., levels=[0],
            interpolation='nearest', colors=['limegreen'], origin='lower')

    p_h = Rectangle((0, 0), 1, 1, fc="blue")
    p_f = Rectangle((0, 0), 1, 1, fc="limegreen")
    plt.legend([p_h, p_f], ["house", "face"])
    plt.title(title, x=.05, ha='left', y=.90, color='w', size=28)
Example #11
0
def intersubjectconsensus():
    """Compute inter-subjects clustering consensus.

    """
    base_dir = r'/nfs/h1/workingshop/huanglijie/uni_mul_analysis'
    db_dir = os.path.join(base_dir, 'multivariate', 'detection', 'mvpcluster')

    n_clusters = 60

    mask_file = os.path.join(base_dir, 'multivariate', 'detection',
                             'mask.nii.gz')
    mask = nib.load(mask_file).get_data()

    for n in range(1, n_clusters):
        n += 1
        merged_file = os.path.join(db_dir, 'merged_cluster_'+str(n)+'.nii.gz')
        merged_data = nib.load(merged_file).get_data()
        n_subjs = merged_data.shape[3]
        mtx = np.zeros((n_subjs, n_subjs))
        for i in range(n_subjs):
            for j in range(n_subjs):
                data_i = merged_data[..., i]
                data_j = merged_data[..., j]
                vtr_i = data_i[np.nonzero(mask)]
                vtr_j = data_j[np.nonzero(mask)]
                tmp = metrics.adjusted_mutual_info_score(vtr_i, vtr_j)
                mtx[i, j] = tmp
        outfile = os.path.join(db_dir, 'consensus_'+str(n)+'.csv')
        np.savetxt(outfile, mtx, delimiter=',')
Example #12
0
    def _run_interface(self, runtime):
        maskdata = nb.load(self.inputs.mask).get_data()
        maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata)))

        session_datas = [[nb.load(fname).get_data()[maskdata].reshape(-1, 1) for fname in sessions] for sessions in self.inputs.subjects_sessions]
        list_of_sessions = [np.dstack(session_data) for session_data in session_datas]
        all_data = np.hstack(list_of_sessions)
        icc = np.zeros(session_datas[0][0].shape)
        session_F = np.zeros(session_datas[0][0].shape)
        session_var = np.zeros(session_datas[0][0].shape)
        subject_var = np.zeros(session_datas[0][0].shape)

        for x in range(icc.shape[0]):
            Y = all_data[x, :, :]
            icc[x], subject_var[x], session_var[x], session_F[x], _, _ = ICC_rep_anova(Y)

        nim = nb.load(self.inputs.subjects_sessions[0][0])
        new_data = np.zeros(nim.shape)
        new_data[maskdata] = icc.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'icc_map.nii')

        new_data = np.zeros(nim.shape)
        new_data[maskdata] = session_var.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'session_var_map.nii')

        new_data = np.zeros(nim.shape)
        new_data[maskdata] = subject_var.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'subject_var_map.nii')

        return runtime
Example #13
0
def mrtrix_spherical_functions():
    """Spherical functions represented by spherical harmonic coefficients and
    evaluated on a discrete sphere.

    Returns
    -------
    func_coef : array (2, 3, 4, 45)
        Functions represented by the coefficients associated with the
        mxtrix spherical harmonic basis of order 8.
    func_discrete : array (2, 3, 4, 81)
        Functions evaluated on `sphere`.
    sphere : Sphere
        The discrete sphere, points on the surface of a unit sphere, used to
        evaluate the functions.

    Notes
    -----
    These coefficients were obtained by using the dwi2SH command of mrtrix.

    """
    func_discrete = load(pjoin(THIS_DIR, "func_discrete.nii.gz")).get_data()
    func_coef = load(pjoin(THIS_DIR, "func_coef.nii.gz")).get_data()
    gradients = np.loadtxt(pjoin(THIS_DIR, "sphere_grad.txt"))
    # gradients[0] and the first volume of func_discrete, 
    # func_discrete[..., 0], are associated with the b=0 signal.
    # gradients[:, 3] are the b-values for each gradient/volume.
    sphere = Sphere(xyz=gradients[1:, :3])
    return func_coef, func_discrete[..., 1:], sphere
Example #14
0
def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op
    from skimage import exposure, img_as_int

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_enh.nii.gz' % fname)

    im = nb.load(in_file)
    imdata = im.get_data()
    imshape = im.get_shape()

    if in_mask is not None:
        msk = nb.load(in_mask).get_data()
        msk[msk > 0] = 1
        msk[msk < 1] = 0
        imdata = imdata * msk

    immin = imdata.min()
    imdata = (imdata - immin).astype(np.uint16)

    adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1),
                                          clip_limit=clip_limit)

    nb.Nifti1Image(adapted.reshape(imshape), im.get_affine(),
                   im.get_header()).to_filename(out_file)

    return out_file
Example #15
0
File: qa.py Project: sbrambati/toad
    def slicerGifCompare(self, source1, source2, target, gifSpeed=100, vmax=None, boundaries=None):
        """Create a animated gif from a 4d NIfTI
        Args:
            source: 4D NIfTI image
            target: outputfile gif name
            gifSpeed: delay between images (tens of ms), default=30
        """
        gifId = self.__idGenerator()

        image1 = nibabel.load(source1)
        imageData1 = image1.get_data()

        image2 = nibabel.load(source2)
        imageData2 = image2.get_data()

        if vmax == None:
            vmax=numpy.percentile(imageData1, 99)

        imageList = []
        for num, image in enumerate([imageData1, imageData2]):
            output = gifId + '{0:04}.png'.format(num)
            self.slicerPng(image[:,:,:,2], output, vmax=vmax, isData=True, boundaries=boundaries)
            imageList.append(output)

        self.__imageList2Gif(imageList, target, gifSpeed)
        #Cleaning temp files
        cmd = 'rm {}*.png'.format(gifId)
        self.launchCommand(cmd)

        return target
Example #16
0
def demean_image(in_file, in_mask=None, out_file=None):
    """
    Demean image data inside mask
    """
    import numpy as np
    import nibabel as nb
    import os.path as op
    import math

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_demean.nii.gz' % fname)

    im = nb.load(in_file)
    data = im.get_data().astype(np.float32)
    msk = np.ones_like(data)

    if in_mask is not None:
        msk = nb.load(in_mask).get_data().astype(np.float32)
        msk[msk > 0] = 1.0
        msk[msk < 1] = 0.0

    mean = np.median(data[msk == 1].reshape(-1))
    data[msk == 1] = data[msk == 1] - mean
    nb.Nifti1Image(data, im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Example #17
0
def reorient_bvecs(in_dwi, old_dwi, in_bvec):
    """
    Checks reorientations of ``in_dwi`` w.r.t. ``old_dwi`` and
    reorients the in_bvec table accordingly.
    """
    import os
    import numpy as np
    import nibabel as nb

    name, fext = os.path.splitext(os.path.basename(in_bvec))
    if fext == '.gz':
        name, _ = os.path.splitext(name)
    out_file = os.path.abspath('%s_reorient.bvec' % name)
    bvecs = np.loadtxt(in_bvec).T
    new_bvecs = []

    N = nb.load(in_dwi).get_affine()
    O = nb.load(old_dwi).get_affine()
    RS = N.dot(np.linalg.inv(O))[:3, :3]
    sc_idx = np.where((np.abs(RS) != 1) & (RS != 0))
    S = np.ones_like(RS)
    S[sc_idx] = RS[sc_idx]
    R = RS/S

    new_bvecs = [R.dot(b) for b in bvecs]
    np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
    return out_file
Example #18
0
def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None):
    """
    Recompose back the dMRI data accordingly the b-values table after EC
    correction
    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_dwi))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_eccorrect%s" % (fname, ext))

    im = nb.load(in_dwi)
    dwidata = im.get_data()
    bvals = np.loadtxt(in_bval)
    dwis = np.where(bvals != 0)[0].tolist()

    if len(dwis) != len(in_corrected):
        raise RuntimeError(('Length of DWIs in b-values table and after'
                            'correction should match'))

    for bindex, dwi in zip(dwis, in_corrected):
        dwidata[..., bindex] = nb.load(dwi).get_data()

    nb.Nifti1Image(dwidata, im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Example #19
0
def siemens2rads(in_file, out_file=None):
    """
    Converts input phase difference map to rads
    """
    import numpy as np
    import nibabel as nb
    import os.path as op
    import math

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_rads.nii.gz' % fname)

    in_file = np.atleast_1d(in_file).tolist()
    im = nb.load(in_file[0])
    data = im.get_data().astype(np.float32)
    hdr = im.get_header().copy()

    if len(in_file) == 2:
        data = nb.load(in_file[1]).get_data().astype(np.float32) - data
    elif (data.ndim == 4) and (data.shape[-1] == 2):
        data = np.squeeze(data[..., 1] - data[..., 0])
        hdr.set_data_shape(data.shape[:3])

    imin = data.min()
    imax = data.max()
    data = (2.0 * math.pi * (data - imin)/(imax-imin)) - math.pi
    hdr.set_data_dtype(np.float32)
    hdr.set_xyzt_units('mm')
    hdr['datatype'] = 16
    nb.Nifti1Image(data, im.get_affine(), hdr).to_filename(out_file)
    return out_file
Example #20
0
def readDataset(niifilename, niiBrainMaskFilename, btablefilename,  parcellationfilename = None):  
     
    # load the masked diffusion dataset
    diffusionData = nib.load(niifilename).get_data()
    affine        = nib.load(niifilename).get_affine()
    
    # load the brain mask
    mask    = nib.load(niiBrainMaskFilename).get_data()
    
    rows, cols, nSlices, nDirections = diffusionData.shape
    
    bvals, bvecs = readbtable(btablefilename)
    gtable       = gradient_table(bvals, bvecs)
    
    if parcellationfilename != None:
        #parcellation = nib.load(parcellationfilename).get_data()
        parcellation,_ = nrrd.read(parcellationfilename)
    
        if parcellation.shape[2] != nSlices:  # for the second phantom (unc_res)
            parcellation = parcellation[:,:,parcellation.shape[2]-nSlices:]        
        parcellation = np.squeeze(parcellation)
    else:
        parcellation = None
    
    return diffusionData, mask, affine, gtable, parcellation
Example #21
0
def bg_mask(in_file, in_mask, out_file=None):
    """
    Rough mask of background from brain masks
    """
    import nibabel as nb
    import numpy as np
    from scipy.ndimage import binary_dilation
    import scipy.ndimage as nd
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_bgmask%s" % (fname, ext))

    im = nb.load(in_file)
    hdr = im.get_header().copy()
    hdr.set_data_dtype(np.uint8)
    hdr.set_xyzt_units('mm')
    imdata = im.get_data()
    msk = nb.load(in_mask).get_data()
    msk = 1 - binary_dilation(msk,
                              structure=np.ones((20, 20, 20)))
    nb.Nifti1Image(msk.astype(np.uint8),
                   im.get_affine(), hdr).to_filename(out_file)
    return out_file
Example #22
0
def data_generator(inputsamplelinks, inputlabellinks, conf, batch_size, rdm=True):
    while True:
        images = inputsamplelinks
        targets = inputlabellinks
        index = [i for i in range(len(images))]

        if rdm:
            rng_state = numpy.random.get_state()
            numpy.random.shuffle(images)
            numpy.random.set_state(rng_state)
            numpy.random.shuffle(targets)

        sample_in_batch = 0
        all_img = numpy.zeros((2,2))
        all_targ = numpy.zeros((2,2))
        for i in range(len(images)):
            img = nibabel.load(images[i]).get_data()
            targ = nibabel.load(targets[i]).get_data()
            #targ = targ/255
            img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2], img.shape[3]))
            targ = targ.reshape((1, targ.shape[0], targ.shape[1], targ.shape[2]))
            targ = multiclass(targ, conf)
            if sample_in_batch == 0:
                all_img = img
                all_targ = targ
                sample_in_batch += 1
            else:
                all_img = numpy.concatenate((all_img, img))
                all_targ = numpy.concatenate((all_targ, targ))
                sample_in_batch += 1
            if sample_in_batch == batch_size:
                sample_in_batch = 0
                yield (numpy.copy(all_img), numpy.copy(all_targ))
Example #23
0
def read_mni_template(contrast="T2"):
    """
    Read the MNI template from disk

    Parameters
    ----------
    contrast : list or string, optional
        Which of the contrast templates to read. Two contrasts are available:
        "T1" and "T2", so you can either enter one of these strings as input,
        or a list containing both of them.

    Returns
    -------
    list : contains the nibabel.Nifti1Image objects requested, according to the
        order they were requested in the input.

    Examples
    --------
    Get only the T2 file:
    >>> T2_nifti = read_mni_template("T2") # doctest: +SKIP
    Get both files in this order:
    >>> T1_nifti, T2_nifti = read_mni_template(["T1", "T2"]) # doctest: +SKIP
    """
    files, folder = fetch_mni_template()
    file_dict = {"T1": pjoin(folder, 'mni_icbm152_t1_tal_nlin_asym_09a.nii'),
                 "T2": pjoin(folder, 'mni_icbm152_t2_tal_nlin_asym_09a.nii')}
    if isinstance(contrast, str):
        return nib.load(file_dict[contrast])
    else:
        out_list = []
        for k in contrast:
            out_list.append(nib.load(file_dict[k]))
    return out_list
Example #24
0
File: misc.py Project: cdla/nipype
 def _run_interface(self, runtime):
     img = nb.load(self.inputs.in_file[0])
     header = img.get_header().copy()
     vollist = [nb.load(filename) for filename in self.inputs.in_file]
     data = np.concatenate([vol.get_data().reshape(
         vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
     if data.dtype.kind == 'i':
         header.set_data_dtype(np.float32)
         data = data.astype(np.float32)
     if isdefined(self.inputs.regress_poly):
         timepoints = img.get_shape()[-1]
         X = np.ones((timepoints, 1))
         for i in range(self.inputs.regress_poly):
             X = np.hstack((X, legendre(
                 i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
         betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
         datahat = np.rollaxis(np.dot(X[:, 1:],
                                      np.rollaxis(
                                          betas[1:, :, :, :], 0, 3)),
                               0, 4)
         data = data - datahat
         img = nb.Nifti1Image(data, img.get_affine(), header)
         nb.save(img, self._gen_output_file_name('detrended'))
     meanimg = np.mean(data, axis=3)
     stddevimg = np.std(data, axis=3)
     tsnr = meanimg / stddevimg
     img = nb.Nifti1Image(tsnr, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name())
     img = nb.Nifti1Image(meanimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('mean'))
     img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('stddev'))
     return runtime
Example #25
0
def check_images(file1, file2):
    """Check that 2 images have the same affines and data shapes.

    Parameters
    ----------
    file1 : str
        Path to the first nifti image

    file2 : str
        Path to the second nifti image
    """
    img = nibabel.load(file1)
    shape1 = np.shape(img.get_data())
    affine1 = img.get_affine()
    img = nibabel.load(file2)
    shape2 = np.shape(img.get_data())
    affine2 = img.get_affine()
    if shape1 != shape2:
        raise ValueError('Images got different shapes: {0} of shape {1}, {2} '
                         'of shape {3}'.format(file1, shape1, file2, shape2))

    if np.any(affine1 != affine2):
        raise ValueError('Images got different affines: {0} has affine {1}, '
                         '{2} has affine {3}'.format(file1, affine1,
                                                     file2, affine2))
Example #26
0
def test_median_otsu_flow():
    with TemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        mo_flow = MedianOtsuFlow()
        mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked,
                             median_radius=median_radius, numpass=numpass,
                             autocrop=autocrop, vol_idx=vol_idx, dilate=dilate)

        mask_name = mo_flow.last_generated_outputs['out_mask']
        masked_name = mo_flow.last_generated_outputs['out_masked']

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        result_mask_data = nib.load(join(out_dir, mask_name)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        result_masked_data = nib.load(join(out_dir, masked_name)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
Example #27
0
def test_split_and_merge(tmpdir):
    import numpy as np
    import nibabel as nb
    import os.path as op
    import os

    from nipype.algorithms.misc import split_rois, merge_rois

    in_mask = example_data('tpms_msk.nii.gz')
    dwfile = op.join(str(tmpdir), 'dwi.nii.gz')
    mskdata = nb.load(in_mask).get_data()
    aff = nb.load(in_mask).affine

    dwshape = (mskdata.shape[0], mskdata.shape[1], mskdata.shape[2], 6)
    dwdata = np.random.normal(size=dwshape)
    os.chdir(str(tmpdir))
    nb.Nifti1Image(dwdata.astype(np.float32),
                   aff, None).to_filename(dwfile)

    resdw, resmsk, resid = split_rois(dwfile, in_mask, roishape=(20, 20, 2))
    merged = merge_rois(resdw, resid, in_mask)
    dwmerged = nb.load(merged).get_data()

    dwmasked = dwdata * mskdata[:, :, :, np.newaxis]

    assert np.allclose(dwmasked, dwmerged)
Example #28
0
    def export(self, nidm_version, export_dir):
        """
        Create prov graph.
        """
        if self.expl_mean_sq_file is None:
            # Create Contrast Explained Mean Square Map as fstat<num>.nii.gz
            # multiplied by sigmasquareds.nii.gz and save it in export_dir
            fstat_img = nib.load(self.stat_file)
            fstat = fstat_img.get_data()

            sigma_sq_img = nib.load(self.sigma_sq_file)
            sigma_sq = sigma_sq_img.get_data()

            expl_mean_sq = nib.Nifti1Image(
                fstat*sigma_sq, fstat_img.get_qform())

            self.filename = ("ContrastExplainedMeanSquareMap" +
                             self.num + ".nii.gz")
            self.expl_mean_sq_file = os.path.join(
                export_dir, self.filename)
            nib.save(expl_mean_sq, self.expl_mean_sq_file)

        self.file = NIDMFile(self.id, self.expl_mean_sq_file,
                             filename=self.filename,
                             sha=self.sha, fmt=self.fmt)

        # Contrast Explained Mean Square Map entity
        path, filename = os.path.split(self.expl_mean_sq_file)
        self.add_attributes((
            (PROV['type'], self.type),
            (NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
            (PROV['label'], self.label)))
Example #29
0
    def _run_interface(self, runtime):
        src_nii = nb.load(self.inputs.src_file)
        src = NiftiWrapper(src_nii, make_empty=True)
        dest_nii = nb.load(self.inputs.dest_file)
        dest = NiftiWrapper(dest_nii, make_empty=True)
        classes = src.meta_ext.get_valid_classes()
        if self.inputs.include_classes:
            classes = [cls
                       for cls in classes
                       if cls in self.inputs.include_classes
                      ]
        if self.inputs.exclude_classes:
            classes = [cls
                       for cls in classes
                       if not cls in self.inputs.exclude_classes
                      ]

        for cls in classes:
            src_dict = src.meta_ext.get_class_dict(cls)
            dest_dict = dest.meta_ext.get_class_dict(cls)
            dest_dict.update(src_dict)
        # Update the shape and slice dimension to reflect the meta extension update.
        dest.meta_ext.slice_dim = src.meta_ext.slice_dim
        dest.meta_ext.shape = src.meta_ext.shape

        self.out_path = path.join(os.getcwd(),
                                  path.basename(self.inputs.dest_file))
        dest.to_filename(self.out_path)

        return runtime
Example #30
0
def extract_noise_components(realigned_file, noise_mask_file, num_components):
    """Derive components most reflective of physiological noise
    """
    import os
    from nibabel import load
    import numpy as np
    import scipy as sp
    imgseries = load(realigned_file)
    components = None
    mask = load(noise_mask_file).get_data()
    voxel_timecourses = imgseries.get_data()[mask > 0]
    voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
    # remove mean and normalize by variance
    # voxel_timecourses.shape == [nvoxels, time]
    X = voxel_timecourses.T
    stdX = np.std(X, axis=0)
    stdX[stdX == 0] = 1.
    stdX[np.isnan(stdX)] = 1.
    stdX[np.isinf(stdX)] = 1.
    X = (X - np.mean(X, axis=0))/stdX
    u, _, _ = sp.linalg.svd(X, full_matrices=False)
    if components is None:
        components = u[:, :num_components]
    else:
        components = np.hstack((components, u[:, :num_components]))
    components_file = os.path.join(os.getcwd(), 'noise_components.txt')
    np.savetxt(components_file, components, fmt="%.10f")
    return components_file
os.listdir(files_dir)

#Iterate in Batch Folder
for f in os.listdir(files_dir):
    if (f.endswith(".nii")):
        filenames.append(f)

for l in filenames:

    if (l[0] == 'v'):

        path_file = os.path.join(niftis_path, l)
        idx = l.index('-')
        folder_volume = os.path.join(folder_volumes, l[idx + 1:-4])
        #print(folder_volume)
        volume = nib.load(path_file)
        print("Processing Volume %s and saving at %s" % (l, folder_volume))
        #print(volume.dataobj.shape)
        imgs = volume.dataobj
        imgs = np.array(imgs).astype('float32')
        imgs[imgs < -150] = -150
        imgs[imgs > 250] = 250
        #print(imgs.shape)
        #print(imgs.min(axis=(0,1)).shape)

        img_volume = 255 * (imgs - imgs.min(axis=(0, 1))) / (
            imgs.max(axis=(0, 1)) - imgs.min(axis=(0, 1)))
        if (not os.path.isdir(folder_volume)):
            os.mkdir(folder_volume)
        #Iterate over image channels
        for section_id in range(imgs.shape[-1]):
Example #32
0
def reconst_flow_core(flow, extra_args=[]):
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_data('small_25')
        vol_img = nib.load(data_path)
        volume = vol_img.get_data()
        mask = np.ones_like(volume[:, :, :, 0])
        mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        nib.save(mask_img, mask_path)

        dti_flow = flow()

        args = [data_path, bval_path, bvec_path, mask_path]
        args.extend(extra_args)

        dti_flow.run(*args, out_dir=out_dir)

        fa_path = dti_flow.last_generated_outputs['out_fa']
        fa_data = nib.load(fa_path).get_data()
        assert_true(fa_data.shape == volume.shape[:-1])

        tensor_path = dti_flow.last_generated_outputs['out_tensor']
        tensor_data = nib.load(tensor_path)
        assert_true(tensor_data.shape[-1] == 6)
        assert_true(tensor_data.shape[:-1] == volume.shape[:-1])

        ga_path = dti_flow.last_generated_outputs['out_ga']
        ga_data = nib.load(ga_path).get_data()
        assert_true(ga_data.shape == volume.shape[:-1])

        rgb_path = dti_flow.last_generated_outputs['out_rgb']
        rgb_data = nib.load(rgb_path)
        assert_true(rgb_data.shape[-1] == 3)
        assert_true(rgb_data.shape[:-1] == volume.shape[:-1])

        md_path = dti_flow.last_generated_outputs['out_md']
        md_data = nib.load(md_path).get_data()
        assert_true(md_data.shape == volume.shape[:-1])

        ad_path = dti_flow.last_generated_outputs['out_ad']
        ad_data = nib.load(ad_path).get_data()
        assert_true(ad_data.shape == volume.shape[:-1])

        rd_path = dti_flow.last_generated_outputs['out_rd']
        rd_data = nib.load(rd_path).get_data()
        assert_true(rd_data.shape == volume.shape[:-1])

        mode_path = dti_flow.last_generated_outputs['out_mode']
        mode_data = nib.load(mode_path).get_data()
        assert_true(mode_data.shape == volume.shape[:-1])

        evecs_path = dti_flow.last_generated_outputs['out_evec']
        evecs_data = nib.load(evecs_path).get_data()
        assert_true(evecs_data.shape[-2:] == tuple((3, 3)))
        assert_true(evecs_data.shape[:-2] == volume.shape[:-1])

        evals_path = dti_flow.last_generated_outputs['out_eval']
        evals_data = nib.load(evals_path).get_data()
        assert_true(evals_data.shape[-1] == 3)
        assert_true(evals_data.shape[:-1] == volume.shape[:-1])
Example #33
0
def test(model_name,
         epoch,
         gpu_id,
         n_test,
         invert_images,
         max_clip,
         indexing,
         use_miccai,
         atlas_file,
         atlas_seg_file,
         normalize_atlas,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    start_time = time.time()
    good_labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # setup
    gpu = '/gpu:' + str(gpu_id)
    #     print(gpu)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    restrict_GPU_tf(str(gpu_id))
    restrict_GPU_keras(str(gpu_id))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    atlas_vol = nib.load(atlas_file).get_data()[np.newaxis, ..., np.newaxis]
    atlas_seg = nib.load(atlas_seg_file).get_data()

    if normalize_atlas:
        atlas_vol = atlas_vol / np.max(atlas_vol) * max_clip

    sz = atlas_seg.shape
    z_inp1 = tf.placeholder(tf.float32, sz)
    z_inp2 = tf.placeholder(tf.float32, sz)
    z_out = losses.kdice(z_inp1, z_inp2, good_labels)
    kdice_fn = K.function([z_inp1, z_inp2], [z_out])

    # load weights of model
    with tf.device(gpu):
        if use_miccai:
            net = networks.miccai2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(
                (vol_size[0] // 2, vol_size[1] // 2, vol_size[2] // 2),
                nb_feats=len(good_labels) + 1,
                indexing=indexing)

        else:
            net = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(vol_size,
                                          nb_feats=len(good_labels) + 1,
                                          indexing=indexing)

    dice_means = []
    dice_stds = []

    for step in range(0, n_test):
        # get data
        if n_test == 1:
            X_vol = nib.load('../t1_atlas.nii').get_data()[np.newaxis, ...,
                                                           np.newaxis]
            X_seg = nib.load('../t1_atlas_seg.nii').get_data()[np.newaxis, ...,
                                                               np.newaxis]
        else:
            vol_name, seg_name = test_brain_strings[step].split(",")
            X_vol, X_seg = datagenerators.load_example_by_name(
                vol_name, seg_name)

        if invert_images:
            X_vol = max_clip - X_vol

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])
            all_labels = np.unique(X_seg)
            for l in all_labels:
                if l not in good_labels:
                    X_seg[X_seg == l] = 0
            for i in range(len(good_labels)):
                X_seg[X_seg == good_labels[i]] = i + 1
            seg_onehot = tf.keras.utils.to_categorical(
                X_seg[0, :, :, :, 0], num_classes=len(good_labels) + 1)
            warp_seg_onehot = trf_model.predict(
                [seg_onehot[tf.newaxis, :, :, :, :], pred[1]])
            warp_seg = np.argmax(warp_seg_onehot[0, :, :, :, :], axis=3)

            warp_seg_correct = np.zeros(warp_seg.shape)
            for i in range(len(good_labels)):
                warp_seg_correct[warp_seg == i + 1] = good_labels[i]

            dice = kdice_fn([warp_seg_correct, atlas_seg])

            mean = np.mean(dice)
            std = np.std(dice)
            dice_means.append(mean)
            dice_stds.append(std)
            print(step, mean, std)

    print('average dice:', np.mean(dice_means))
    print('std over patients:', np.std(dice_means))
    print('average std over regions:', np.mean(dice_stds))
    print('time taken:', time.time() - start_time)
def main(argv):
    inputfile = 'pat0-subvolume-tformed.nii'
    outputfile = 'folder'
    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        print('nii2png.py -i <inputfile> -o <outputfile>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('nii2png.py -i <inputfile> -o <outputfile>')
            sys.exit()
        elif opt in ("-i", "--input"):
            inputfile = arg
        elif opt in ("-o", "--output"):
            outputfile = arg

    print('Input file is ', inputfile)
    print('Output folder is ', outputfile)

    # set fn as your 4d nifti file
    image_array = nibabel.load(inputfile).get_data()
    print(len(image_array.shape))

    # ask if rotate
    # ask_rotate = input('Would you like to rotate the orientation? (y/n) ')
    ask_rotate = 'n'

    if ask_rotate.lower() == 'y':
        ask_rotate_num = int(input('OK. By 90° 180° or 270°? '))
        if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
            print('Got it. Your images will be rotated by {} degrees.'.format(
                ask_rotate_num))
        else:
            print(
                'You must enter a value that is either 90, 180, or 270. Quitting...'
            )
            sys.exit()
    elif ask_rotate.lower() == 'n':
        print('OK, Your images will be converted it as it is.')
    else:
        print('You must choose either y or n. Quitting...')
        sys.exit()

    # if 4D image inputted
    if len(image_array.shape) == 4:
        # set 4d array dimension values
        nx, ny, nz, nw = image_array.shape

        # set destination folder
        if not os.path.exists(outputfile):
            os.makedirs(outputfile)
            print("Created ouput directory: " + outputfile)

        print('Reading NIfTI file...')

        total_volumes = image_array.shape[3]
        total_slices = image_array.shape[2]

        # iterate through volumes
        for current_volume in range(0, total_volumes):
            slice_counter = 0
            # iterate through slices
            for current_slice in range(0, total_slices):
                if (slice_counter % 1) == 0:
                    # rotate or no rotate
                    if ask_rotate.lower() == 'y':
                        if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
                            print('Rotating image...')
                            if ask_rotate_num == 90:
                                data = numpy.rot90(image_array[:, :,
                                                               current_slice,
                                                               current_volume])
                            elif ask_rotate_num == 180:
                                data = numpy.rot90(
                                    numpy.rot90(image_array[:, :,
                                                            current_slice,
                                                            current_volume]))
                            elif ask_rotate_num == 270:
                                data = numpy.rot90(
                                    numpy.rot90(
                                        numpy.rot90(
                                            image_array[:, :, current_slice,
                                                        current_volume])))
                    elif ask_rotate.lower() == 'n':
                        data = image_array[:, :, current_slice, current_volume]

                    # alternate slices and save as png
                    print('Saving image...')
                    image_name = inputfile[:-4] + "_t" + "{:0>3}".format(
                        str(current_volume + 1)) + "_z" + "{:0>3}".format(
                            str(current_slice + 1)) + ".png"
                    scipy.misc.imsave(image_name, data)
                    print('Saved.')

                    # move images to folder
                    print('Moving files...')
                    src = image_name
                    shutil.move(src, outputfile)
                    slice_counter += 1
                    print('Moved.')

        print('Finished converting images')

    # else if 3D image inputted
    elif len(image_array.shape) == 3:
        # set 4d array dimension values
        nx, ny, nz = image_array.shape

        # set destination folder
        if not os.path.exists(outputfile):
            os.makedirs(outputfile)
            print("Created ouput directory: " + outputfile)

        print('Reading NIfTI file...')

        total_slices = image_array.shape[2]

        slice_counter = 0
        # iterate through slices
        for current_slice in range(0, total_slices):
            # alternate slices
            if (slice_counter % 1) == 0:
                # rotate or no rotate
                if ask_rotate.lower() == 'y':
                    if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
                        if ask_rotate_num == 90:
                            data = numpy.rot90(image_array[:, :,
                                                           current_slice])
                        elif ask_rotate_num == 180:
                            data = numpy.rot90(
                                numpy.rot90(image_array[:, :, current_slice]))
                        elif ask_rotate_num == 270:
                            data = numpy.rot90(
                                numpy.rot90(
                                    numpy.rot90(image_array[:, :,
                                                            current_slice])))
                elif ask_rotate.lower() == 'n':
                    data = image_array[:, :, current_slice]

                data = normalize_image(data)

                # alternate slices and save as png
                if (slice_counter % 1) == 0:
                    print('Saving image...')
                    image_name = inputfile[:-4] + "_z" + "{:0>3}".format(
                        str(current_slice + 1)) + ".png"
                    imageio.imwrite(image_name, data)
                    print('Saved.')

                    # move images to folder
                    # print('Moving image...')
                    # src = image_name
                    # shutil.move(src, outputfile)
                    # slice_counter += 1
                    # print('Moved.')

        print('Finished converting images')
    else:
        print('Not a 3D or 4D Image. Please try again.')
Example #35
0
def get_studies_by_regions(dataset, masks, threshold=0.08,
                           remove_overlap=True, studies=None,
                           features=None, regularization="scale"):
    """ Set up data for a classification task given a set of masks
    
        Given a set of masks, this function retrieves studies associated with each 
        mask at the specified threshold, optionally removes overlap and filters by studies 
        and features, and returns studies by feature matrix (X) and class labels (y)

        Args:
            dataset: a Neurosynth dataset
            maks: a list of paths to Nifti masks
            threshold: percentage of voxels active within the mask for study to be included
            remove_overlap: A boolean indicating if studies studies that appear in more than 
                one mask should be excluded
            studies: An optional list of study names used to constrain the set used in
                classification. If None, will use all features in the dataset.
            features: An optional list of feature names used to constrain the set used in
                classification. If None, will use all features in the dataset.
            regularize: Optional boolean indicating if X should be regularized

        Returns:
            A tuple (X, y) of np arrays. 
            X is a feature by studies matrix and y is a vector of class labels
    """

    import nibabel as nib
    import os

    # Load masks using NiBabel

    try:
        loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
    except OSError:
        print 'Error loading masks. Check the path'

    # Get a list of studies that activate for each mask file--i.e.,  a list of
    # lists

    grouped_ids = [dataset.get_ids_by_mask(m, threshold=threshold)
                   for m in loaded_masks]

    # Flattened ids

    flat_ids = reduce(lambda a, b: a + b, grouped_ids)

    # Remove duplicates

    if remove_overlap:
        import collections
        flat_ids = [id for (id, count) in
                    collections.Counter(flat_ids).items() if count == 1]
        grouped_ids = [[x for x in m if x in flat_ids] for m in
                       grouped_ids]  # Remove

    # Create class label(y)

    y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)]
    y = reduce(lambda a, b: a + b, y)  # Flatten
    y = np.array(y)

    # Extract feature set for only relevant ids

    X = dataset.get_feature_data(ids=flat_ids, features=features)

    if regularization:
        X = regularize(X, method=regularization)

    return (X, y)
def main():
    # Plot Save Path
    base_plot_path = r'/nfs/masi/nathv/py_src_code_2020/dmipy_model_pictures'
    base_plot_path = os.path.normpath(base_plot_path)

    # Method Saving Paths
    base_save_path = r'/nfs/masi/nathv/miccai_2020/micro_methods_hcp_mini'
    base_save_path = os.path.normpath(base_save_path)

    # Create base saving path for Method
    # TODO The Method name can be made an argument later on
    method_name = 'IVIM'

    # Base HCP Data Path
    base_data_path = r'/nfs/HCP/data'
    base_data_path = os.path.normpath(base_data_path)

    # Subject ID's list
    subj_ID_List = ['115017', '114823', '116726', '118225']
    # TODO When needed loop here over the ID list
    for subj_ID in subj_ID_List:
        # Subject Save Path
        subj_save_path = os.path.join(base_save_path, subj_ID)
        if os.path.exists(subj_save_path) == False:
            os.mkdir(subj_save_path)

        # TODO For later the subject data, bval and bvec reading part can be put inside a function
        subj_data_path = os.path.join(base_data_path, subj_ID, 'T1w',
                                      'Diffusion')

        # Read the Nifti file, bvals and bvecs
        subj_bvals = np.loadtxt(os.path.join(subj_data_path, 'bvals'))
        subj_bvecs = np.loadtxt(os.path.join(subj_data_path, 'bvecs'))

        all_bvals = subj_bvals * 1e6
        all_bvecs = np.transpose(subj_bvecs)

        subj_Acq_Scheme = acquisition_scheme_from_bvalues(all_bvals, all_bvecs)
        print(subj_Acq_Scheme.print_acquisition_info)

        print('Loading the Nifti Data ...')
        data_start_time = time.time()

        subj_babel_object = nib.load(
            os.path.join(subj_data_path, 'data.nii.gz'))
        subj_data = subj_babel_object.get_fdata()
        axial_slice_data = subj_data[:, :, 25:27, :]

        data_end_time = time.time()
        data_time = np.int(np.round(data_end_time - data_start_time))

        print('Data Loaded ... Time Taken: {}'.format(data_end_time -
                                                      data_start_time))
        print('The Data Dimensions are: {}'.format(subj_data.shape))

        #### IVIM ####
        print('Fitting IVIM ...')
        fit_start_time = time.time()
        ivim_fit_dmipy_fixed = ivim_Dstar_fixed(subj_Acq_Scheme,
                                                subj_data,
                                                mask=subj_data[..., 0] > 0)
        fit_end_time = time.time()
        print('IVIM Fitting Completed ... Time Taken to fit: {}'.format(
            fit_end_time - fit_start_time))
        fit_time = np.int(np.round(fit_end_time - fit_start_time))

        # Get List of Estimated Parameter Names
        # para_Names_list = ivim_fit_dmipy_fixed.parameter_names

        fitted_parameters = ivim_fit_dmipy_fixed.fitted_parameters

        para_Names_list = []
        for key, value in fitted_parameters.items():
            para_Names_list.append(key)

        ### Nifti Saving Part
        # Create a directory per subject
        subj_method_save_path = os.path.join(subj_save_path, method_name)
        if os.path.exists(subj_method_save_path) == False:
            os.mkdir(subj_method_save_path)

        # Retrieve the affine from already Read Nifti file to form the header
        affine = subj_babel_object.affine

        # Loop over fitted parameters name list
        for each_fitted_parameter in para_Names_list:
            new_img = nib.Nifti1Image(fitted_parameters[each_fitted_parameter],
                                      affine)

            # Form the file path
            f_name = each_fitted_parameter + '.nii.gz'
            param_file_path = os.path.join(subj_method_save_path, f_name)

            nib.save(new_img, param_file_path)

    return None
MDD_ind = np.argwhere(subjects>100)[:,0]
sessions = [1,3]
nDays = len(sessions)
average_within_mat = np.zeros((n_systems,n_systems,nSub,nDays))
average_one_vs_all = np.zeros((n_systems,nSub,nDays))
# NOW CALCULATE DATA FOR SUBJECTS
for s in np.arange(nSub):
	subjectNum=subjects[s]
	bids_id = 'sub-{0:03d}'.format(subjectNum)
	for ses in np.arange(nDays):
		subjectDay=sessions[ses]
		ses_id = 'ses-{0:02d}'.format(subjectDay)
		clean_path = noise_save_dir + '/' + bids_id + '/' + ses_id
		cleaned_image = '{0}/{1}_{2}_task_rest_glm.nii.gz'.format(clean_path,bids_id,ses_id)

		cleaned_image_data = nib.load(cleaned_image).get_fdata()
		# doing standardize = True here at least makes it so voxels outside of brain would have 0 std and not be included
		masker = NiftiLabelsMasker(labels_img=powerAtlas, standardize=True,
		                           memory='nilearn_cache', verbose=5)
		time_series = masker.fit_transform(cleaned_image) # now data is n time points x 264 nodes
		time_series_good_labels = time_series[:,all_good_labels] # now data is in n time points x 227 nodes
		correlation_measure = ConnectivityMeasure(kind='correlation')
		correlation_matrix = correlation_measure.fit_transform([time_series_good_labels])[0] # takes correlation for all 227 nodes
		np.fill_diagonal(correlation_matrix,np.nan) # to make sure you don't get the same node in the within connectivity difference
		for row in np.arange(n_systems):
			for col in np.arange(n_systems):
				if row == col: # diagonal
					average_within_mat[row,col,s,ses] = calculateWithinConnectivity(systems_to_keep[row],correlation_matrix,complete_labels,system,all_good_ROI)
				#else:
				#	average_within_mat[row,col,s,ses] = calculatePairwiseConnectivity(systems_to_keep[row],systems_to_keep[col],correlation_matrix,complete_labels,system,all_good_ROI)
			# now calculate oneVsAll
Example #38
0
def getISLEPad():
    mainPath = 'F:/Research/isles2/'
    newPath = 'F:/Research/isles_padded/'

    for x in range(1, 95):
        path = mainPath + 'case_' + str(x)
        normPath = newPath + 'case_' + str(x)
        MTT = path + '/CT_MTT.nii'
        newMTT = normPath + '/CT_MTT.nii'
        if not os.path.exists(MTT):
            continue
        if not os.path.exists(normPath):
            os.mkdir(normPath)

        n1_img = nib.load(MTT)
        n1_header = n1_img.header
        n1_affine = n1_img.affine
        img = n1_img.get_fdata()
        # add padding
        padded = np.pad(img, ((0, 0), (0, 0), (18, 18)),
                        'constant',
                        constant_values=(0, 0))
        new_img = nib.Nifti1Image(padded, n1_affine, n1_header)
        nib.save(new_img, newMTT)

        rCBF = path + '/CT_CBF.nii'
        newrCBF = normPath + '/CT_CBF.nii'
        n1_img = nib.load(rCBF)
        n1_header = n1_img.header
        n1_affine = n1_img.affine
        img = n1_img.get_fdata()
        # add padding
        padded = np.pad(img, ((0, 0), (0, 0), (18, 18)),
                        'constant',
                        constant_values=(0, 0))
        new_img = nib.Nifti1Image(padded, n1_affine, n1_header)
        nib.save(new_img, newrCBF)

        rCBV = path + '/CT_CBV.nii'
        newrCBV = normPath + '/CT_CBV.nii'
        n1_img = nib.load(rCBV)
        n1_header = n1_img.header
        n1_affine = n1_img.affine
        img = n1_img.get_fdata()
        # add padding
        padded = np.pad(img, ((0, 0), (0, 0), (18, 18)),
                        'constant',
                        constant_values=(0, 0))
        new_img = nib.Nifti1Image(padded, n1_affine, n1_header)
        nib.save(new_img, newrCBV)

        Tmax = path + '/CT_Tmax.nii'
        newTmax = normPath + '/CT_Tmax.nii'
        n1_img = nib.load(Tmax)
        n1_header = n1_img.header
        n1_affine = n1_img.affine
        img = n1_img.get_fdata()
        # add padding
        padded = np.pad(img, ((0, 0), (0, 0), (18, 18)),
                        'constant',
                        constant_values=(0, 0))
        new_img = nib.Nifti1Image(padded, n1_affine, n1_header)
        nib.save(new_img, newTmax)

        label = path + '/OT.nii'
        newLabel = normPath + '/OT.nii'
        n1_img = nib.load(label)
        n1_header = n1_img.header
        n1_affine = n1_img.affine
        img = n1_img.get_fdata()
        # add padding
        padded = np.pad(img, ((0, 0), (0, 0), (18, 18)),
                        'constant',
                        constant_values=(0, 0))
        new_img = nib.Nifti1Image(padded, n1_affine, n1_header)
        nib.save(new_img, newLabel)
Example #39
0
        print("Error, file missing. #t1:{}, #masks:{}".format(
            len(filenames), len(masks)))

    print("Using model:", model_filename)

    # used only for printing result
    mean_dice = 0
    pred_vols = []
    gt_vols = []

    roc_aucs = []
    precision_scores = []

    for filename, mask in zip(filenames, masks):
        # load nifti file data
        nii_obj = nib.load(os.path.join(DATA_DIR, filename))
        nii_img = nii_obj.get_data()
        header = nii_obj.header
        affine = nii_obj.affine

        #print("nii image shape = {}".format(nii_img.shape))

        # load mask file data
        mask_obj = nib.load(os.path.join(DATA_DIR, mask))
        mask_img = mask_obj.get_data()

        # pad and reshape to account for implicit "1" channel
        # Uncomment if using just t1
        nii_img = np.reshape(nii_img, nii_img.shape + (1, ))

        orig_shape = nii_img.shape
    def start(self, ds, **kwargs):
        print("Split-Labels started..")

        run_dir = os.path.join(WORKFLOW_DIR, kwargs['dag_run'].run_id)
        batch_folders = [
            f for f in glob.glob(os.path.join(run_dir, BATCH_NAME, '*'))
        ]

        print("Found {} batches".format(len(batch_folders)))

        for batch_element_dir in batch_folders:
            input_dir = os.path.join(batch_element_dir, self.operator_in_dir)
            output_dir = os.path.join(batch_element_dir, self.operator_out_dir)

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Search for NIFIs in {}".format(input_dir))
            batch_nifti_files = sorted(
                glob.glob(os.path.join(input_dir, "**", "*.nii*"),
                          recursive=True))
            print("Found {} NIFTI files.".format(len(batch_nifti_files)))
            if len(batch_nifti_files) == 0:
                print("No NIFTI file was found!")
                print("abort!")
                exit(1)

            for nifti in batch_nifti_files:
                print("Loading labels...")
                label_names = self.get_labels_from_json(
                    multilabel_dir=os.path.dirname(nifti))
                print("Loading NIFTI: {}".format(nifti))
                img = nib.load(nifti)
                data = img.get_fdata(dtype=np.float16)
                label_values = np.unique(data).astype(
                    img.header.get_data_dtype())[1:]

                print("Found {} labels...".format(len(label_values)))
                if len(label_values) == 0:
                    print("No labels could be idenified!")
                    print("abort!")
                    exit(1)

                if len(label_values) > len(label_names):
                    print(
                        "There were more labels detected than label-names provided!"
                    )
                    print("abort!")
                    exit(1)

                print("Splitting multilabel into single NIFTI-files!")

                for label in label_values:
                    label_name = label_names[label]
                    tmp_mask = (data == label) * data
                    label_values = np.unique(tmp_mask).astype(
                        img.header.get_data_dtype())
                    tmp_mask[tmp_mask == label] = 1

                    new_image = nib.nifti2.Nifti1Image(dataobj=tmp_mask.astype(
                        img.header.get_data_dtype()),
                                                       header=img.header,
                                                       affine=img.affine)
                    # new_image = nib.nifti2.Nifti2Image(dataobj=tmp_mask.astype(header.get_data_dtype()),header=img.header,affine=img.affine)
                    print("Saving extracted mask {}: {}".format(
                        label, label_name))
                    nib.save(
                        new_image,
                        os.path.join(output_dir,
                                     '{}.nii.gz'.format(label_name)))

                print("done")
Example #41
0
def nifti4pycortex(nifti_file):
    """Load nifti file for pycortex visualization."""
    data = nib.load(nifti_file).get_data()
    ndata = np.rollaxis(data, 0, 3)
    ndata = np.rollaxis(ndata, 0, 2)
    return ndata
Example #42
0
"""
Plot Haxby masks
=================

Small script to plot the masks of the Haxby dataset.
"""
import numpy as np
import matplotlib.pyplot as plt

import nibabel

from nilearn import datasets
data = datasets.fetch_haxby()
bold_img = nibabel.load(data.func[0])
# Build the mean image because we have no anatomic data
fmri_data = bold_img.get_data().astype(float)
mean_img = fmri_data.mean(axis=-1)

z = 25

fig = plt.figure(figsize=(4, 5.4))
fig.subplots_adjust(bottom=0., top=1., left=0., right=1.)
plt.axis('off')

plt.imshow(mean_img[:, 4:58, z].T,
           cmap=plt.cm.gray,
           interpolation='nearest',
           origin='lower')

mask_vt = nibabel.load(data.mask_vt[0]).get_data()
plt.contour(mask_vt[:, 4:58, z].astype(np.bool).T,
Example #43
0
def test_check_orient_and_dims():
    """
    Test check_orient_and_dims functionality
    """
    # This test has a bak folder in its test_dir.
    # To replicate test rm data in test_dir and cp from bak
    base_dir = str(Path(__file__).parent / "examples")
    test_dir = f"{base_dir}/003/test_out/test_check_orient_and_dims"

    # Antomical: 1x1x1mm
    anat_LAS = f"{test_dir}/anat_LAS/sub-003_T1w_LAS.nii.gz"
    anat_RAS = f"{test_dir}/anat_RAS/sub-003_T1w_RAS.nii.gz"
    # Diffusion: 2x2x2mm
    dmri_LAS = f"{test_dir}/dmri_LAS/iso_eddy_corrected_data_denoised_LAS.nii.gz"
    dmri_RAS = f"{test_dir}/dmri_RAS/iso_eddy_corrected_data_denoised_RAS.nii.gz"
    bvecs_LAS = f"{test_dir}/dmri_LAS/bvec.orig.bvec"
    bvecs_RAS = f"{test_dir}/dmri_RAS/bvec.trans.bvec"

    anat_LAStoRAS = utils.check_orient_and_dims(anat_LAS,
                                                test_dir,
                                                '2mm',
                                                bvecs=None)
    anat_RAStoRAS = utils.check_orient_and_dims(anat_RAS,
                                                test_dir,
                                                '2mm',
                                                bvecs=None)
    dmri_LAStoRAS, bvecs_LAStoRAS = utils.check_orient_and_dims(
        dmri_LAS, test_dir, '1mm', bvecs=bvecs_LAS)
    dmri_RAStoRAS, bvecs_RAStoRAS = utils.check_orient_and_dims(
        dmri_RAS, test_dir, '1mm', bvecs=bvecs_RAS)

    anat_LAStoRAS = nib.load(anat_LAStoRAS)

    anat_RAStoRAS = nib.load(anat_RAStoRAS)

    dmri_LAStoRAS = nib.load(dmri_LAStoRAS)

    dmri_RAStoRAS = nib.load(dmri_RAStoRAS)

    # Assert that output arrays are identical.
    anat_check = np.allclose(anat_LAStoRAS.affine.astype('int'),
                             anat_RAStoRAS.affine.astype('int'))
    dmri_check = np.allclose(dmri_LAStoRAS.affine.astype('int'),
                             dmri_RAStoRAS.affine.astype('int'))

    # Assert that voxel dimensions in ouputs are correct.
    anat_LAStoRAS_dims = anat_LAStoRAS.header.get_zooms()
    anat_RAStoRAS_dims = anat_RAStoRAS.header.get_zooms()
    dmri_LAStoRAS_dims = dmri_LAStoRAS.header.get_zooms()
    dmri_RAStoRAS_dims = dmri_RAStoRAS.header.get_zooms()

    anat_LAStoRAS_success = True
    anat_RAStoRAS_success = True
    dmri_LAStoRAS_success = True
    dmri_RAStoRAS_success = True

    for anat_LAStoRAS_dim in anat_LAStoRAS_dims[:3]:
        if anat_LAStoRAS_dim != 2:
            anat_LAStoRAS_success = False

    for anat_RAStoRAS_dim in anat_RAStoRAS_dims[:3]:
        if anat_RAStoRAS_dim != 2:
            anat_RAStoRAS_success = False

    for dmri_LAStoRAS_dim in dmri_LAStoRAS_dims[:3]:
        if dmri_LAStoRAS_dim != 1:
            dmri_LAStoRAS_success = False

    print(dmri_RAStoRAS_dims)
    for dmri_RAStoRAS_dim in dmri_RAStoRAS_dims[:3]:
        if dmri_RAStoRAS_dim != 1:
            dmri_RAStoRAS_success = False

    # Checks arrays
    assert anat_check is True
    assert dmri_check is True
    # Checks voxel dimensions
    assert anat_LAStoRAS_success is True
    assert anat_RAStoRAS_success is True
    assert dmri_LAStoRAS_success is True
    assert dmri_RAStoRAS_success is True
Example #44
0
def _tpm2roi(
    in_tpm,
    in_mask,
    mask_erosion_mm=None,
    erosion_mm=None,
    mask_erosion_prop=None,
    erosion_prop=None,
    pthres=0.95,
    newpath=None,
):
    """
    Generate a mask from a tissue probability map
    """
    tpm_img = nb.load(in_tpm)
    roi_mask = (tpm_img.get_fdata() >= pthres).astype(np.uint8)

    eroded_mask_file = None
    erode_in = (mask_erosion_mm is not None
                and mask_erosion_mm > 0) or (mask_erosion_prop is not None
                                             and mask_erosion_prop < 1)
    if erode_in:
        eroded_mask_file = fname_presuffix(in_mask,
                                           suffix="_eroded",
                                           newpath=newpath)
        mask_img = nb.load(in_mask)
        mask_data = np.asanyarray(mask_img.dataobj).astype(np.uint8)
        if mask_erosion_mm:
            iter_n = max(
                int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1)
            mask_data = nd.binary_erosion(mask_data, iterations=iter_n)
        else:
            orig_vol = np.sum(mask_data > 0)
            while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop:
                mask_data = nd.binary_erosion(mask_data, iterations=1)

        # Store mask
        eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header)
        eroded.set_data_dtype(np.uint8)
        eroded.to_filename(eroded_mask_file)

        # Mask TPM data (no effect if not eroded)
        roi_mask[~mask_data] = 0

    # shrinking
    erode_out = (erosion_mm is not None
                 and erosion_mm > 0) or (erosion_prop is not None
                                         and erosion_prop < 1)
    if erode_out:
        if erosion_mm:
            iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1)
            iter_n = int(erosion_mm / max(tpm_img.header.get_zooms()))
            roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n)
        else:
            orig_vol = np.sum(roi_mask > 0)
            while np.sum(roi_mask > 0) / orig_vol > erosion_prop:
                roi_mask = nd.binary_erosion(roi_mask, iterations=1)

    # Create image to resample
    roi_fname = fname_presuffix(in_tpm, suffix="_roi", newpath=newpath)
    roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header)
    roi_img.set_data_dtype(np.uint8)
    roi_img.to_filename(roi_fname)
    return roi_fname, eroded_mask_file or in_mask
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs])
    assert_outputs_exist(parser, args, args.frf_file)

    vol = nib.load(args.input)
    data = vol.get_data()

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    check_b0_threshold(args, bvals.min())
    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    if args.min_fa_thresh < 0.4:
        logging.warn(
            'Minimal FA threshold ({}) seems really small. Make sure it '
            'makes sense for this dataset.'.format(args.min_fa_thresh))

    if args.mask:
        mask = nib.load(args.mask).get_data().astype(np.bool)
        data = applymask(data, mask)

    if args.mask_wm:
        wm_mask = nib.load(args.mask_wm).get_data().astype('bool')
    else:
        wm_mask = np.ones_like(data[..., 0], dtype=np.bool)
        logging.warn(
            'No white matter mask specified! mask_data will be used instead, '
            'if it has been supplied. \nBe *VERY* careful about the '
            'estimation of the fiber response function to ensure no invalid '
            'voxel was used.')

    data_in_wm = applymask(data, wm_mask)

    fa_thresh = args.fa_thresh
    # Iteratively trying to fit at least 300 voxels. Lower the FA threshold
    # when it doesn't work. Fail if the fa threshold is smaller than
    # the min_threshold.
    # We use an epsilon since the -= 0.05 might incurs numerical imprecision.
    nvox = 0
    while nvox < args.min_nvox and fa_thresh >= args.min_fa_thresh - 0.00001:
        response, ratio, nvox = auto_response(gtab, data_in_wm,
                                              roi_center=args.roi_center,
                                              roi_radius=args.roi_radius,
                                              fa_thr=fa_thresh,
                                              return_number_of_voxels=True)

        logging.debug(
            'Number of indices is %s with threshold of %s', nvox, fa_thresh)
        fa_thresh -= 0.05

    if nvox < args.min_nvox:
        raise ValueError(
            "Could not find at least {} voxels with sufficient FA "
            "to estimate the FRF!".format(args.min_nvox))

    logging.debug("Found %i voxels with FA threshold %f for FRF estimation",
                  nvox, fa_thresh + 0.05)
    logging.debug("FRF eigenvalues: %s", str(response[0]))
    logging.debug("Ratio for smallest to largest eigen value is %f", ratio)
    logging.debug("Mean of the b=0 signal for voxels used for FRF: %f",
                  response[1])

    full_response = np.array([response[0][0], response[0][1],
                              response[0][2], response[1]])

    np.savetxt(args.frf_file, full_response)
Example #46
0
def _get_zooms(in_file):
    import nibabel as nb
    return tuple(nb.load(in_file).header.get_zooms()[:3])

label_vol_path = sys.argv[1]
label = sys.argv[2]
icc = int(sys.argv[3])

try:
    os.mkdir("output")
except:
    pass

lut = []
meas = []

# Load mask
cc_parc_img = nib.load(label_vol_path)
cc_parc_data = cc_parc_img.get_data()
# Arrange the array to be in RAS+ space
ornt = nib.orientations.axcodes2ornt(nib.aff2axcodes(cc_parc_img.affine))
cc_parc_data = nib.orientations.apply_orientation(cc_parc_data, ornt)

vxl_size = cc_parc_img.get_header().get_zooms()

roi = {}
roi["label"] = label

meas = print_volume(roi, cc_parc_data, vxl_size[0] * vxl_size[1] * vxl_size[2])

with open('/output/measurement.json', 'w') as f:
    json.dump(meas, f, indent=4, separators=(',', ': '))
Example #48
0
def data_load(dirOutput, data, row, bin_tresh, colourchannel_approach,
              penalisation_approach):
    print('--\nNow processing data: ' + data)
    ''' Create output folder(s) '''
    dirOutData = dirOutput + '/' + data
    dirOutDataCom = dirOutput + '/' + data + '/IAM_combined_python/'
    dirOutDataFin = dirOutput + '/' + data + '/IAM_GPU_nifti_python/'
    dirOutDataPatch = dirOutput + '/' + data + '/IAM_combined_python/Patch/'

    if not os.path.exists(dirOutData):
        try:
            os.makedirs(dirOutData)
            os.makedirs(dirOutDataCom)
            os.makedirs(dirOutDataFin)
            os.makedirs(dirOutDataPatch)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

    try:
        if penalisation_approach:
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg1_Result/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg1_Result/Image/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg1_Result/Data/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg2_Result/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg2_Result/Image/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/Alg2_Result/Data/')
        if colourchannel_approach:
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/ColorChannel_Result/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/ColorChannel_Result/Image/')
            os.makedirs(dirOutput + '/' + data +
                        '/IAM_combined_python/ColorChannel_Result/Data/')

    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    #niiDir = input_dir + '/' + data + '/'
    FLAIR_nii = nib.load(row[1])  #nib.load(niiDir + 'FLAIR.nii.gz')  #
    T1w_nii = nib.load(row[2])  # nib.load(niiDir + 'T1W.nii.gz')  #
    T2w_nii = nib.load(row[3])
    icv_nii = nib.load(row[4])  #nib.load(niiDir + 'ICV.nii.gz')
    csf_nii = nib.load(row[5])  #nib.load(niiDir + 'CSF.nii.gz')  #
    GT_nii = nib.load(row[6])  #nib.load(niiDir + 'WMH_label.nii.gz')  #

    FLAIR_data = np.squeeze(FLAIR_nii.get_data())
    icv_data = np.squeeze(icv_nii.get_data())
    csf_data = np.squeeze(csf_nii.get_data())
    T1w_data = np.squeeze(T1w_nii.get_data())
    T2w_data = np.squeeze(T2w_nii.get_data())
    gt_data = np.squeeze(GT_nii.get_data())
    ''' Make sure that all brain masks are binary masks, not probability '''
    icv_data[icv_data > bin_tresh] = 1
    icv_data[icv_data <= bin_tresh] = 0
    csf_data[csf_data > bin_tresh] = 1
    csf_data[csf_data <= bin_tresh] = 0
    gt_data[gt_data > bin_tresh] = 1
    gt_data[gt_data <= bin_tresh] = 0
    ''' Read and open NAWM data if available '''
    #nawm_nii = nib.load(niiDir + 'NAWM.nii.gz')
    #print(niiDir)
    #nawm_data = np.squeeze(nawm_nii.get_data())
    #print('NAWM:', len(nawm_data > 0))
    del csf_nii, icv_nii, T1w_nii, GT_nii  # Free memory

    return [FLAIR_data, T1w_data,
            T2w_data], icv_data, csf_data, gt_data, FLAIR_nii
Example #49
0
import nibabel as nib
import skimage.io as io
import numpy as np
img = nib.load(r'data\test\image\Case196.nii.gz')
img_arr = img.get_fdata()
img_arr = np.squeeze(img_arr)
io.imshow(img_arr)
if __name__ == "__main__":
    base_image = "/dfsdata2/zhangyao_data/DB/Abdomen1K/Task100_FourAbdOrgans/imagesTr"
    base_label = "/dfsdata2/zhangyao_data/DB/Abdomen1K/Task100_FourAbdOrgans/labelsTr"
    out = "../data/nnUNet_raw_data/Task410_SpleenBase"
    cases = np.sort(os.listdir(base_label))
    cases = [x for x in cases if x.split("_")[0] == "spleen"]

    maybe_mkdir_p(out)
    maybe_mkdir_p(join(out, "imagesTr"))
    maybe_mkdir_p(join(out, "imagesTs"))
    maybe_mkdir_p(join(out, "labelsTr"))

    for c in cases:
        shutil.copy(join(base_image, "Spleen41", c[:-7] + "_0000.nii.gz"), join(out, "imagesTr", c[:-7] + "_0000.nii.gz"))
        # shutil.copy(join(base_label, c), join(out, "labelsTr", c))
        nii_volume = nib.load(join(base_label, c))
        seg_volume = nii_volume.get_fdata()
        seg_volume[seg_volume!=3] = 0
        seg_volume[seg_volume==3] = 1
        nib.save(nib.Nifti1Image(seg_volume, nii_volume.affine), join(out, "labelsTr", c))
        print(join(out, "imagesTr", c[:-7] + "_0000.nii.gz"))

    json_dict = {}
    json_dict['name'] = "SpleenBase"
    json_dict['description'] = ""
    json_dict['tensorImageSize'] = "4D"
    json_dict['reference'] = ""
    json_dict['licence'] = ""
    json_dict['release'] = "0.0"
    json_dict['modality'] = {
        "0": "CT",
Example #51
0
def bigmask_creation(subj, subjects_dir, masks_dir):
    import nibabel as nib
    import numpy as np

    #mangle_hemi = lambda x:'%s_%s'%(x[-9:-7],x[:-10])
    #demangle_hemi = lambda x:'%s-%s.nii.gz'%(x[3:],x[:2])

    #parc_orig = 'laus125'
    #parc = '%s_nonoverlapping' % parc_orig
    for parc in ('laus250_wmreg', ):

        #masks_dir=os.path.join(masks_top_lev_dir,parc)
        centroids_dir = os.path.join(masks_dir, parc)

        #copy parc_orig to parc if parc doesnt exist yet
        if not os.path.exists(masks_dir):
            raise ValueError('No such parcellation')
            #os.makedirs(masks_dir)
            #import shutil
            #shutil.copytree(centroids_dir,masks_dir,symlinks=True)

        mask_template = nib.load(
            os.path.join(centroids_dir, 'bankssts_1-lh.nii.gz'))

        mask_shape = mask_template.shape
        big_mask = np.zeros((mask_shape[0], mask_shape[1], mask_shape[2]),
                            dtype=int)
        binary_mask = big_mask.copy()

        print 'building mask in diffusion space with shape {0}'.format(
            mask_shape)

        #create centroids
        print "collecting centroids"
        centroids = {}

        #make sure dirents are in alphabetical order
        dirents = []
        for hemi_path in ('lh.nii.gz', 'rh.nii.gz'):
            for dirent in sorted(os.listdir(centroids_dir)):
                if dirent.endswith(hemi_path):
                    dirents.append(dirent)

        for i, dirent in enumerate(dirents):
            print 'DBG: collecting centroid for subsequent mask', i, dirent
            try:
                r = nib.load(os.path.join(centroids_dir, dirent)).get_data()
                r[np.where(r)] = 1
                centroids.update({i: np.mean(np.where(r), axis=1)})
                big_mask += r * (i + 1)
                binary_mask += r
            except nib.spatialimages.ImageFileError:
                continue

        #binary_mask[binary_mask>0]=1
        print "all centroids collected"

        xi, yi, zi = np.where(binary_mask > 1)

        for x, y, z in zip(xi, yi, zi):
            #print x,y,z
            vox = np.array((x, y, z))
            closest_centroid = 0
            dist = np.inf
            nr_collisions = 2  #divisor starts at 2
            #ensuring that a random voxel will be chosen
            for i, centroid in centroids.items():
                #print vox,centroid
                cent_dist = np.linalg.norm(vox - centroid)
                if cent_dist < dist:
                    dist = cent_dist
                    closest_centroid = i + 1

                if cent_dist == dist:
                    if np.random.random() < 1. / nr_collisions:
                        dist = cent_dist
                        closest_centroid = i + 1
                    nr_collisions += 1

            big_mask[x, y, z] = closest_centroid

        img = nib.Nifti1Image(big_mask, mask_template.get_affine(),
                              mask_template.get_header())
        nib.save(img, os.path.join(masks_dir, 'bigmask_%s.nii.gz' % parc))

        print '%s bigmask saved' % parc
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_hdf5, args.in_labels],
                        args.force_labels_list)

    log_level = logging.WARNING
    if args.verbose:
        log_level = logging.INFO
    logging.basicConfig(level=log_level)
    coloredlogs.install(level=log_level)

    measures_to_compute = []
    measures_output_filename = []
    if args.volume:
        measures_to_compute.append('volume')
        measures_output_filename.append(args.volume)
    if args.streamline_count:
        measures_to_compute.append('streamline_count')
        measures_output_filename.append(args.streamline_count)
    if args.length:
        measures_to_compute.append('length')
        measures_output_filename.append(args.length)
    if args.similarity:
        measures_to_compute.append('similarity')
        measures_output_filename.append(args.similarity[1])

    dict_maps_out_name = {}
    if args.maps is not None:
        for in_folder, out_name in args.maps:
            measures_to_compute.append(in_folder)
            dict_maps_out_name[in_folder] = out_name
            measures_output_filename.append(out_name)

    dict_metrics_out_name = {}
    if args.metrics is not None:
        for in_name, out_name in args.metrics:
            # Verify that all metrics are compatible with each other
            if not is_header_compatible(args.metrics[0][0], in_name):
                raise IOError('Metrics {} and  {} do not share a compatible '
                              'header'.format(args.metrics[0][0], in_name))

            # This is necessary to support more than one map for weighting
            measures_to_compute.append((in_name, nib.load(in_name)))
            dict_metrics_out_name[in_name] = out_name
            measures_output_filename.append(out_name)

    assert_outputs_exist(parser, args, measures_output_filename)
    if not measures_to_compute:
        parser.error('No connectivity measures were selected, nothing '
                     'to compute.')

    logging.info('The following measures will be computed and save: {}'.format(
        measures_output_filename))

    if args.include_dps:
        if not os.path.isdir(args.include_dps):
            os.makedirs(args.include_dps)
        logging.info('data_per_streamline weighting is activated.')

    img_labels = nib.load(args.in_labels)
    data_labels = get_data_as_label(img_labels)
    if not args.force_labels_list:
        labels_list = np.unique(data_labels)[1:].tolist()
    else:
        labels_list = np.loadtxt(args.force_labels_list,
                                 dtype=np.int16).tolist()

    comb_list = list(itertools.combinations(labels_list, r=2))
    if not args.no_self_connection:
        comb_list.extend(zip(labels_list, labels_list))

    nbr_cpu = validate_nbr_processes(parser, args, args.nbr_processes)
    measures_dict_list = []
    if nbr_cpu == 1:
        for comb in comb_list:
            measures_dict_list.append(
                _processing_wrapper([
                    args.in_hdf5, img_labels, comb, measures_to_compute,
                    args.similarity, args.density_weighting, args.include_dps
                ]))
    else:
        pool = multiprocessing.Pool(nbr_cpu)
        measures_dict_list = pool.map(
            _processing_wrapper,
            zip(itertools.repeat(args.in_hdf5), itertools.repeat(img_labels),
                comb_list, itertools.repeat(measures_to_compute),
                itertools.repeat(args.similarity),
                itertools.repeat(args.density_weighting),
                itertools.repeat(args.include_dps)))
        pool.close()
        pool.join()

    # Removing None entries (combinaisons that do not exist)
    # Fusing the multiprocessing output into a single dictionary
    measures_dict_list = [it for it in measures_dict_list if it is not None]
    measures_dict = measures_dict_list[0]
    for dix in measures_dict_list[1:]:
        measures_dict.update(dix)

    if args.no_self_connection:
        total_elem = len(labels_list)**2 - len(labels_list)
        results_elem = len(measures_dict.keys()) * 2 - len(labels_list)
    else:
        total_elem = len(labels_list)**2
        results_elem = len(measures_dict.keys()) * 2

    logging.info('Out of {} possible nodes, {} contain value'.format(
        total_elem, results_elem))

    # Filling out all the matrices (symmetric) in the order of labels_list
    nbr_of_measures = len(list(measures_dict.values())[0])
    matrix = np.zeros((len(labels_list), len(labels_list), nbr_of_measures))
    for in_label, out_label in measures_dict:
        curr_node_dict = measures_dict[(in_label, out_label)]
        measures_ordering = list(curr_node_dict.keys())

        for i, measure in enumerate(curr_node_dict):
            in_pos = labels_list.index(in_label)
            out_pos = labels_list.index(out_label)

            matrix[in_pos, out_pos, i] = curr_node_dict[measure]
            matrix[out_pos, in_pos, i] = curr_node_dict[measure]

    # Saving the matrices separatly with the specified name or dps
    for i, measure in enumerate(measures_ordering):
        if measure == 'volume':
            matrix_basename = args.volume
        elif measure == 'streamline_count':
            matrix_basename = args.streamline_count
        elif measure == 'length':
            matrix_basename = args.length
        elif measure == 'similarity':
            matrix_basename = args.similarity[1]
        elif measure in dict_metrics_out_name:
            matrix_basename = dict_metrics_out_name[measure]
        elif measure in dict_maps_out_name:
            matrix_basename = dict_maps_out_name[measure]
        else:
            matrix_basename = measure

        np.save(matrix_basename, matrix[:, :, i])
Example #53
0
def read_ants_stats(ants_stats_file,
                    ants_brainvols_file,
                    mri_file,
                    force_error=True):
    """
    Reads in an ANTS stats file along with associated mri_file (for voxel sizes) and converts to a measures dictionary with keys:
    ['structure':XX, 'items': [{'name': 'NVoxels', 'description': 'Number of voxels','value':XX, 'units':'unitless'},
                        {'name': 'Volume_mm3', 'description': ''Volume', 'value':XX, 'units':'mm^3'}]]
    :param ants_stats_file: path to ANTS segmentation output file named "antslabelstats"
    :param ants_brainvols_file: path to ANTS segmentation output for Bvol, Gvol, Wvol, and ThicknessSum (called antsbrainvols"
    :param mri_file: mri file to extract voxel sizes from
    :param freesurfer_lookup_table: Lookup table used to map 1st column of ants_stats_file label numbers to structure names
    :return: measures is a list of dictionaries as defined above
    """

    # fs_lookup_table = loadfreesurferlookuptable(freesurfer_lookup_table)

    # open stats file, brain vols file as pandas dataframes
    ants_stats = pd.read_csv(ants_stats_file)
    brain_vols = pd.read_csv(ants_brainvols_file)

    # load mri_file and extract voxel sizes
    img = nib.load(mri_file)
    vox_size = np.product(list(img.header.get_zooms()))

    with open(cde_file, "r") as fp:
        ants_cde = json.load(fp)

    measures = []
    changed = False
    # iterate over columns in brain vols
    for key, j in brain_vols.T.iterrows():
        value = j.values[0]
        keytuple = ANTSDKT(
            structure=key if "vol" in key.lower() else "Brain",
            hemi=None,
            measure="Volume" if "vol" in key.lower() else key,
            unit="mm^3"
            if "vol" in key.lower() else "mm" if "Thickness" in key else None,
        )
        if str(keytuple) not in ants_cde:
            ants_cde["count"] += 1
            ants_cde[str(keytuple)] = {
                "id": f"{ants_cde['count']:0>6d}",
                "label": f"{key} ({keytuple.unit})",
            }
            if force_error:
                raise ValueError(
                    f"Key {keytuple} not found in ANTS data elements file")
            changed = True
        if "vol" in key.lower():
            measures.append(
                (f'{ants_cde[str(keytuple)]["id"]}', str(int(value))))
        else:
            measures.append((f'{ants_cde[str(keytuple)]["id"]}', str(value)))

    # iterate over columns in brain vols
    for row in ants_stats.iterrows():
        structure = None
        for key, val in row[1].items():
            if key == "Label":
                segid = int(val)
                structure = get_id_to_struct(segid)
                if structure is None:
                    raise ValueError(
                        f"{int(val):d} did not return any structure")
                continue
            if "VolumeInVoxels" not in key and "Area" not in key:
                continue
            hemi, measure, unit = get_details(key, structure)
            key_tuple = ANTSDKT(structure=structure,
                                hemi=hemi,
                                measure=measure,
                                unit=unit)
            label = f"{structure} {measure} ({unit})"
            if str(key_tuple) not in ants_cde:
                ants_cde["count"] += 1
                ants_cde[str(key_tuple)] = {
                    "id": f"{ants_cde['count']:0>6d}",
                    "structure_id": segid,
                    "label": label,
                }
                if force_error:
                    raise ValueError(
                        f"Key {key_tuple} not found in ANTS data elements file"
                    )
                changed = True
            # measures.append((f'{ants_cde[str(key_tuple)]["id"]}', str(val)))

            if "VolumeInVoxels" in key:
                measure = "Volume"
                unit = "mm^3"
                key_tuple = ANTSDKT(structure=structure,
                                    hemi=hemi,
                                    measure=measure,
                                    unit=unit)
                label = f"{structure} {measure} ({unit})"
                if str(key_tuple) not in ants_cde:
                    ants_cde["count"] += 1
                    ants_cde[str(key_tuple)] = {
                        "id": f"{ants_cde['count']:0>6d}",
                        "structure_id": segid,
                        "label": label,
                    }
                    if force_error:
                        raise ValueError(
                            f"Key {key_tuple} not found in ANTS data elements file"
                        )
                    changed = True
                measures.append(
                    (f'{ants_cde[str(key_tuple)]["id"]}', str(val * vox_size)))

    if changed:
        with open(cde_file, "w") as fp:
            json.dump(ants_cde, fp, indent=2)

    return measures
Example #54
0
def linearly_corrected_matrix_creation(subj,
                                       subjdir,
                                       nmr_dir,
                                       parclist_dir,
                                       masks_dir,
                                       shell=None,
                                       trk_stem='tracts20k'):
    import subprocess
    import numpy as np
    import nibabel as nib
    import os.path as op
    print 'moving on to harmonic mean algorithm'

    parc = 'laus250'
    parc_stem = 'wmreg'

    tracts_stem = trk_stem if shell is None else '%s_sgsh_%s' % (trk_stem,
                                                                 shell)

    #extract surf array
    if not os.path.exists(
            os.path.join(subjdir, subj, 'label', 'surf_%s.table.npy' % parc)):
        os.environ['SUBJECTS_DIR'] = nmr_dir

        for hemi in ('lh', 'rh'):
            labels = []

            if not op.exists(
                    op.join(subjdir, subj, 'label', '%s.%s.annot' %
                            (hemi, parc))):
                label2annot_cmd = (
                    'mris_label2annot --s %s --h %s --ldir %s '
                    '--a %s --ctab %s --no-unknown' %
                    ('%s' % subj, hemi,
                     os.path.join(subjdir, subj, 'label', parc), parc,
                     os.path.join(parclist_dir, '%s_ctab_%s.txt' %
                                  (parc, hemi))))
                print label2annot_cmd
                w = subprocess.call(label2annot_cmd, shell=True)

                #mris_anatomical_stats is needed to get the surface area of each
                #label which is not the same as the number of vertices
            if not op.exists(
                    op.join(subjdir, subj, 'label', '%s.%s.table' %
                            (hemi, parc))):
                extract_sa_cmd = 'mris_anatomical_stats -a %s -f %s %s %s' % (
                    os.path.join(subjdir, subj, 'label', '%s.%s.annot' %
                                 (hemi, parc)),
                    os.path.join(subjdir, subj, 'label', '%s.%s.table' %
                                 (hemi, parc)), '%s' % subj, hemi)
                print extract_sa_cmd
                p = subprocess.call(extract_sa_cmd, shell=True)

        #build surf array
        lh_table = open(
            os.path.join(subjdir, subj, 'fsrecon', 'label',
                         'lh.%s.table' % parc))
        rh_table = open(
            os.path.join(subjdir, subj, 'fsrecon', 'label',
                         'rh.%s.table' % parc))

        labels_cmp_fname = op.join(parclist_dir, 'orders',
                                   '%s_cmp_all.txt' % parc)
        labels = open(labels_cmp_fname)

        a = []
        for ln in labels:
            a.append(ln.strip())

        surfArray = [0] * len(a)

        for i, r in enumerate(a):
            if r[:2] == 'rh':
                for line in rh_table:
                    if filter(None, line.split(" "))[0].startswith(r[3:]):
                        surfArray[i] = filter(None, line.split(" "))[2]
            if r[:2] == 'lh':
                for line in lh_table:
                    if filter(None, line.split(" "))[0].startswith(r[3:]):
                        print line
                        surfArray[i] = filter(None, line.split(" "))[2]
            rh_table.seek(0, 0)
            lh_table.seek(0, 0)
        np.save(
            os.path.join(subjdir, subj, 'fsrecon', 'label',
                         'surf_%s.table.npy' % parc), surfArray)

        for file in (lh_table, rh_table, labels):
            file.close()

    #calculate fiber lengths and then add values to matrix in
    #single iteration over track file
    roi_mask = nib.load(
        os.path.join(masks_dir,
                     'bigmask_%s_%s.nii.gz' % (parc, parc_stem))).get_data()

    nr_rois = int(np.max(roi_mask))
    connection_matrix = np.zeros((nr_rois, nr_rois))

    def euclidean_track_length(xyz_array):
        '''see docstring in connectomemapper util.length'''

        xyz = np.asarray(xyz_array)
        if xyz.shape[0] < 2:
            return 0
        dists = np.sqrt((np.diff(xyz, axis=0)**2).sum(axis=1))
        return np.sum(dists)

    def fiber_endpoints(fiber, voxel_size, roi_mask, fov):
        '''
        returns a 2x1 matrix with indices of the masks containing the
        start and end points of this fiber.
    
        adapted from cmp.connectionmatrix.creatematrix
        '''
        f = fiber[0]

        start_xyz = np.array(f[0, :], dtype=int)
        end_xyz = np.array(f[-1, :], dtype=int)

        #account for the voxel size in the trk file which is
        #some random number based on the trk file format.
        #also account for the difference in FOV between the trk
        #file and the ROImask image which is independent.
        #print fov, roi_mask.shape, voxel_size

        # print voxel_size

        voxel_size = (2, 2, 2)
        # voxel_size = (1,1,2)

        voxel_size = voxel_size * fov / roi_mask.shape

        start_voxel = start_xyz // voxel_size
        end_voxel = end_xyz // voxel_size

        #DSI studio prints out voxel size as 1,1,1 no matter what (I think?)
        #start_voxel = start_xyz // (voxel_size*2)
        #end_voxel = end_xyz // (voxel_size*2)

        # print fov, roi_mask.shape
        # print start_xyz, end_xyz, voxel_size, start_voxel, end_voxel

        # dims_to_flip = (0,1,)
        # dims_to_flip = (1,2,)
        dims_to_flip = (1, )

        for i in dims_to_flip:
            start_voxel[i] = roi_mask.shape[i] - start_voxel[i] - 1
            end_voxel[i] = roi_mask.shape[i] - end_voxel[i] - 1


#        print roi_mask[start_voxel[0], start_voxel[1], start_voxel[2]]
#        print [start_voxel[i] for i in xrange(3)]
#        print [[start_voxel[i]] for i in xrange(3)]
#        print roi_mask[[[start_voxel[i]] for i in xrange(3)]]

        start_roi = [roi_mask[[[start_voxel[i]] for i in xrange(3)]]]
        end_roi = [roi_mask[[[end_voxel[i]] for i in xrange(3)]]]

        #print start_roi, end_roi

        #unbox from list comprehension
        return int(start_roi[0]) - 1, int(end_roi[0]) - 1

    #TRACKVIS WAY

    low_cutoff, high_cutoff = 20, 500

    #if not os.path.exists(os.path.join(subjdir, subj, 'fiber_lengths.npy')):
    #    tracks_file = os.path.join(subjdir, subj, '%s.trk'%tracts_stem)
    #    streams, hdr = nib.trackvis.read(tracks_file, as_generator=True)
    #if int(hdr['n_count']) != nr_fibers:
    #    raise Exception('The number of fibers is is not 20000')
    #    voxel_size = np.array(hdr['voxel_size'], dtype=int)
    #    nr_fibers = hdr['dim']

    #low_cutoff, high_cutoff = -np.inf, np.inf

    #    for i,fib in enumerate(streams):
    #        fiber_length = euclidean_track_length(fib[0])
    #
    #        #apply fiber cutoffs -- length filtering as in CMP
    #        #spline filtering should be done earlier if needed
    #        if not low_cutoff < fiber_length < high_cutoff:
    #            continue
    #
    #        start_roi, end_roi = fiber_endpoints(fib, voxel_size, roi_mask,
    #            hdr['dim'])
    #
    #        if start_roi==-1 or end_roi==-1:
    #            continue
    #        connection_matrix[start_roi, end_roi] += 1./fiber_length
    #    #np.save(os.path.join(subjdir, subj, 'fiber_lengths.npy'), fiber_lengths)

    #TEXT FILE WAY
    text_file = os.path.join(subjdir, subj, '%s.txt' % tracts_stem)

    with open(text_file) as fd:
        for ln in fd:
            tract = np.reshape(np.array(ln.split(), dtype=float), (-1, 3))
            fiber_length = euclidean_track_length(tract)

            if not low_cutoff < fiber_length < high_cutoff:
                continue

            #get endpoints manually
            #no dealing with stupid trackvis space
            start_voxel = np.floor(tract[0])
            end_voxel = np.floor(tract[-1])

            #flip dimensions
            dims_to_flip = (1, )

            for i in dims_to_flip:
                start_voxel[i] = roi_mask.shape[i] - start_voxel[i] - 1
                end_voxel[i] = roi_mask.shape[i] - end_voxel[i] - 1

            start_roi = [roi_mask[[[start_voxel[i]] for i in xrange(3)]]]
            start_roi = int(start_roi[0]) - 1
            end_roi = [roi_mask[[[end_voxel[i]] for i in xrange(3)]]]
            end_roi = int(end_roi[0]) - 1

            if start_roi == -1 or end_roi == -1:
                continue

            connection_matrix[start_roi, end_roi] += 1. / fiber_length

    connection_matrix = connection_matrix + connection_matrix.T
    print 'finished evaluating fibers'

    #normalize by surface area

    roisl, roisr = [], []
    print "didn't find these ROIs"
    with open(os.path.join(parclist_dir, '%s_entries' % parc)) as fd:
        for ln in fd:
            for hemi, rois in zip(('lh', 'rh'), (roisl, roisr)):
                roi = '%s-%s' % (ln.strip(), hemi)
                if not os.path.exists(
                        os.path.join(masks_dir, '%s_%s' %
                                     (parc, parc_stem), '%s.nii.gz' % roi)):
                    print masks_dir, '%s_%s' % (parc, parc_stem), roi
                    continue
                rois.append(roi)
    rois = roisl + roisr

    roi_ord = []
    mangle_hemi = lambda s: '%s-%s' % (s[3:], s[:2])
    with open(os.path.join(parclist_dir, '%s_cmp.txt' % parc)) as fd:
        for ln in fd:
            roi_ord.append(mangle_hemi(ln.strip()))
    surfs_table = np.load(
        os.path.join(subjdir, subj, 'fsrecon', 'label',
                     'surf_%s.table.npy' % parc))

    sa = [int(surfs_table[roi_ord.index(roi)]) for roi in rois]

    normalization = (np.tile(sa,
                             (np.size(sa), 1)) + np.tile(sa,
                                                         (np.size(sa), 1)).T)

    print 'finished normalizing'

    connection_matrix = connection_matrix * 2 / normalization

    #np.save( os.path.join(subjdir, subj, 'linear_harmonic_connections.npy'), connection_matrix)
    #np.save( os.path.join(subjdir, subj, 'linear_normalization.npy'), normalization)

    np.fill_diagonal(connection_matrix, 0)

    np.save(
        os.path.join(subjdir, subj,
                     'linear_harmonic_%s_alph_%s.npy' % (parc, trk_stem)),
        connection_matrix)
Example #55
0
def nib_load(file_name):
    proxy = nib.load(file_name)
    data = proxy.get_data().astype('float32')
    proxy.uncache()
    return data
Example #56
0
from nibabel.processing import resample_to_output
from tqdm import tqdm
import numpy as np

path_bis = "SpineWeb/SpineWeb_15_downsampled"
if not os.path.isdir(path_bis):
    os.mkdir(path_bis)

path_img = 'SpineWeb/SpineWeb_15'

for folder in tqdm(os.listdir(path_img)):
    if "sub" in folder:
        im_file = os.path.join(path_img, folder) + "/" + folder + "_CT.nii.gz"
        label_file = os.path.join(
            path_img, folder) + "/" + folder + "_segmentation.nii.gz"
        im = nib.load(im_file)
        seg = nib.load(label_file)

        im = resample_to_output(im, (1, 1, 3), order=3)
        seg = resample_to_output(seg, (1, 1, 3), order=0)

        affine = seg.affine
        un = np.unique(seg)
        for i in range(1, len(un)):
            seg[seg == un[i]] = len(un) - i

        if folder == "sub-013":
            print("resampling 13")
            seg[seg < 4] = 0
            seg[seg > 0] = seg[seg > 0] - 3
        if folder == "sub-011":
Example #57
0
def nii2cmu(nifti_file,
            mask_file=None,
            smooth=None,
            zscore=False,
            zscore_by_rest=False,
            rest_starts=None,
            rest_ends=None):
    if zscore_by_rest:
        rest_starts = rest_starts.strip('[]')
        rest_starts = [int(s) for s in rest_starts.split(',')]
        rest_ends = rest_ends.strip('[]')
        rest_ends = [int(s) for s in rest_ends.split(',')]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            image = nib.load(nifti_file)
            mask = NiftiMasker(mask_strategy='background',
                               smoothing_fwhm=smooth,
                               standardize=False)
            if mask_file is None:
                mask.fit(nifti_file)
            else:
                mask.fit(mask_file)
        header = image.header
        sform = image.get_sform()
        voxel_size = header.get_zooms()
        voxel_activations = np.float64(mask.transform(nifti_file)).transpose()
        rest_activations = voxel_activations[:, rest_starts[0]:rest_ends[0]]
        for i in range(1, len(rest_starts)):
            rest_activations = np.hstack(
                (rest_activations,
                 voxel_activations[:, rest_starts[i]:rest_ends[i]]))
        standard_transform = sklearn.preprocessing.StandardScaler().fit(
            rest_activations.T)
        voxel_activations = standard_transform.transform(voxel_activations.T).T
        voxel_coordinates = np.array(np.nonzero(
            mask.mask_img_.dataobj)).transpose()
        voxel_coordinates = np.hstack(
            (voxel_coordinates, np.ones((voxel_coordinates.shape[0], 1))))
        voxel_locations = (voxel_coordinates @ sform.T)[:, :3]
    else:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            image = nib.load(nifti_file)
            mask = NiftiMasker(mask_strategy='background',
                               smoothing_fwhm=smooth,
                               standardize=zscore)
            if mask_file is None:
                mask.fit(nifti_file)
            else:
                mask.fit(mask_file)

        header = image.header
        sform = image.get_sform()
        voxel_size = header.get_zooms()
        voxel_activations = np.float64(mask.transform(nifti_file)).transpose()
        voxel_coordinates = np.array(np.nonzero(
            mask.mask_img_.dataobj)).transpose()
        voxel_coordinates = np.hstack(
            (voxel_coordinates, np.ones((voxel_coordinates.shape[0], 1))))
        voxel_locations = (voxel_coordinates @ sform.T)[:, :3]

    return {'data': voxel_activations, 'R': voxel_locations}
Example #58
0
        group_img,
        display_mode='yz',
        cut_coords=[-10, 10],
        colorbar=False,
        cmap='Spectral_r',
        threshold=90,
        vmin=90,
        vmax=300,
        #title=method,
        dim=1,
        annotate=False,
        draw_cross=False,
        black_bg=True,
        figure=fig,
    )
    img_tmp = nb.load(template_gm)
    img_tmp = new_img_like(img_tmp, (img_tmp.get_data() >= 0.05).astype('int'),
                           copy_header=True)
    display.add_contours(img_tmp, color='w')
    fig.savefig('%s/group_stdev_%s.svg' % (res_path, method))

    stdev_values.append([method, group_img.get_data()])

# Plot histograms of voxel distribution above threshold
sns.set_style('darkgrid')
plt.style.use('seaborn-colorblind')
threshold = 5
fig = plt.figure(figsize=(8, 4))
for m, t in stdev_values:
    sns.kdeplot(t[t > threshold],
                shade=True,
Example #59
0
def group_reproducibility_metrics(mask_images,
                                  contrast_images,
                                  variance_images,
                                  thresholds,
                                  ngroups,
                                  method,
                                  cluster_threshold=10,
                                  number_of_samples=10,
                                  sigma=6.,
                                  do_clusters=True,
                                  do_voxels=True,
                                  do_peaks=True,
                                  swap=False):
    """
    Main function to perform reproducibility analysis, including nifti1 io

    Parameters
    ----------
    threshold: list or 1-d array,
               the thresholds to be tested

    Returns
    -------
    cluster_rep_results: dictionary,
                         results of cluster-level reproducibility analysi
    voxel_rep_results: dictionary,
                       results of voxel-level reproducibility analysis
    peak_rep_results: dictionary,
                      results of peak-level reproducibility analysis
    """
    from nibabel import load
    from ..mask import intersect_masks

    if ((len(variance_images) == 0) & (method is not 'crfx')):
        raise ValueError('Variance images are necessary')

    nsubj = len(contrast_images)

    # compute the group mask
    affine = get_affine(load(mask_images[0]))
    mask = intersect_masks(mask_images, threshold=0) > 0
    domain = grid_domain_from_binary_array(mask, affine)

    # read the data
    group_con = []
    group_var = []
    for s in range(nsubj):
        group_con.append(load(contrast_images[s]).get_data()[mask])
        if len(variance_images) > 0:
            group_var.append(load(variance_images[s]).get_data()[mask])

    group_con = np.squeeze(np.array(group_con)).T
    group_con[np.isnan(group_con)] = 0
    if len(variance_images) > 0:
        group_var = np.squeeze(np.array(group_var)).T
        group_var[np.isnan(group_var)] = 0
        group_var = np.maximum(group_var, 1.e-15)

    # perform the analysis
    voxel_rep_results = {}
    cluster_rep_results = {}
    peak_rep_results = {}

    for ng in ngroups:
        if do_voxels:
            voxel_rep_results.update({ng: {}})
        if do_clusters:
            cluster_rep_results.update({ng: {}})
        if do_peaks:
            peak_rep_results.update({ng: {}})
        for th in thresholds:
            kappa = []
            cls = []
            pk = []
            kwargs = {'threshold': th, 'csize': cluster_threshold}

            for i in range(number_of_samples):
                if do_voxels:
                    kappa.append(
                        voxel_reproducibility(group_con, group_var, domain, ng,
                                              method, swap, **kwargs))
                if do_clusters:
                    cls.append(
                        cluster_reproducibility(group_con, group_var, domain,
                                                ng, sigma, method, swap,
                                                **kwargs))
                if do_peaks:
                    pk.append(
                        peak_reproducibility(group_con, group_var, domain, ng,
                                             sigma, method, swap, **kwargs))

            if do_voxels:
                voxel_rep_results[ng].update({th: np.array(kappa)})
            if do_clusters:
                cluster_rep_results[ng].update({th: np.array(cls)})
            if do_peaks:
                peak_rep_results[ng].update({th: np.array(cls)})

    return voxel_rep_results, cluster_rep_results, peak_rep_results
Example #60
0
img_dir = 'E:\\abhivanth\\normalized_input'

out_dir = 'in_patches_20_20_16'
# x_shape = 20
# y_shape = 20
# z_shape = 16

x_shape = 20
y_shape = 20
z_shape = 16

#def main():
fns = glob(os.path.join(img_dir, '*.nii*'))
for fn in fns:
    _, base, ext = split_filename(fn)
    img = nib.load(fn).get_data().astype(np.float32).squeeze()
    if img.ndim != 3:
        print(
            f'Only 3D data supported. File {base}{ext} has dimension {img.ndim}. Skipping.'
        )
        continue

    input_filename = fn

    reader = itk.imread(input_filename, itk.F)
    reader.Update()
    image = reader

    # if (img.shape[0] % int(x_shape)) != 0:
    #     print(f'File {base}{ext} does not ensure equal split of input image along x axis')
    #     continue