Esempio n. 1
0
    def _run_interface(self, runtime):
        file1 = os.path.abspath(self.inputs.asl_file)
        # get the asl file and mo file
        aslcontext1 = file1.replace('_asl.nii.gz', '_aslcontext.tsv')
        aslcontext = pd.read_csv(aslcontext1, header=None)
        m0file = file1.replace('_asl.nii.gz', '_m0scan.nii.gz')

        # get control and label asl
        idasl = aslcontext[0].tolist()
        controllist = [i for i in range(0, len(idasl)) if idasl[i] == 'control']
        labellist = [i for i in range(0, len(idasl)) if idasl[i] == 'label']
        m0list = [i for i in range(0, len(idasl)) if idasl[i] == 'moscan']
        allasl = nb.load(self.inputs.in_file)
        mask = nb.load(self.inputs.in_mask).get_fdata()
        dataasl = allasl.get_fdata()
        if len(dataasl.shape) == 5:
            raise RuntimeError('Input image (%s) is 5D.')
        control_img = dataasl[:, :, :, controllist]
        label_img = dataasl[:, :, :, labellist]

        #generate cbf by substracting  label from control 
        cbf_data = np.subtract(control_img, label_img)
        if self.inputs.dummy_vols != 0:
            cbf_data = np.delete(cbf_data, range(0, self.inputs.dummy_vols), axis=3)
            control_img = np.delete(control_img, range(0, self.inputs.dummy_vols), axis=3)

        # MO file
        if os.path.isfile(m0file):
            # get the raw m0 file
            m0file=nb.load(m0file).get_fdata()
            m0data_smooth = smooth_image(nb.load(m0file), fwhm=self.inputs.fwhm).get_data()
            avg_control = mask*np.mean(m0data_smooth, axis=3)
        elif len(m0list) > 0:
            # if no m0file, check from asl data
            modata2 = dataasl[:, :, :, m0list]
            con2 = nb.Nifti1Image(modata2, allasl.affine, allasl.header)
            m0data_smooth = smooth_image(con2, fwhm=self.inputs.fwhm).get_data()
            avg_control = mask*np.mean(m0data_smooth, axis=3)
        else:
            # else use average control
            control_img = dataasl[:, :, :, controllist]
            con = nb.Nifti1Image(control_img, allasl.affine, allasl.header)
            control_img1 = smooth_image(con, fwhm=self.inputs.fwhm).get_data()
            avg_control = mask*np.mean(control_img1, axis=3)


        self._results['out_file'] = fname_presuffix(self.inputs.in_file,
                                                    suffix='_cbftimeseries', newpath=runtime.cwd)
        self._results['out_avg'] = fname_presuffix(self.inputs.in_file,
                                                   suffix='_avg_control', newpath=runtime.cwd)
        nb.Nifti1Image(
            cbf_data, allasl.affine, allasl.header).to_filename(
            self._results['out_file'])
        nb.Nifti1Image(
            avg_control, allasl.affine, allasl.header).to_filename(
            self._results['out_avg'])

        self.inputs.out_file = os.path.abspath(self._results['out_file'])
        self.inputs.out_avg = os.path.abspath(self._results['out_avg'])
        return runtime
Esempio n. 2
0
def epi_smooth(epi_fn, mask_fn, fwhm = None):
    '''
    Smooth epi with a Gaussian kernel

    Inputs:
    epi_fn - Path to epi file
    fwhm - int. Full width half maximum (in mm) of kernel to smooth epi file

    Outputs:
    smoothed_epi - Smoothed epi file to fwhm
    '''

    hdr = nb.load(epi_fn)

    brain_mask = nb.load(mask_fn).get_fdata().astype(bool)

    if not fwhm:
        fwhm = 0
        print('\n\n***WARNING***\nKernel FWHM not set!\n\n')

    print('Smoothing with kernel size: {}'.format(fwhm))

    smoothed_flat = nbproc.smooth_image(img = hdr, fwhm = fwhm).get_fdata()[brain_mask]
    smoothed_epi = np.zeros_like(hdr.get_fdata())
    smoothed_epi[brain_mask] = smoothed_flat

    out_path, in_fn = os.path.split(epi_fn)
    out_fn = in_fn.split('.')[0].split('bold')[0] + '_smoothed_{}mm.nii.gz'.format(fwhm)
    out_name = os.path.join(out_path, out_fn)

    nb.Nifti1Image(smoothed_epi, header = hdr.header, affine = hdr.affine).to_filename(out_name)

    return(out_name)
Esempio n. 3
0
def smooth_analyze(image, fwhm, output):

    from nibabel import processing as nibproc

    img = nib.load(image)
    smoothed = nibproc.smooth_image(img, fwhm, out_class=nib.AnalyzeImage)
    nib.save(smoothed, output)
Esempio n. 4
0
def cbf_qei(gm, wm, csf, img, thresh=0.7):
    def fun1(x, xdata):
        d1 = np.exp(-(x[0]) * np.power(xdata, x[1]))
        return (d1)

    def fun2(x, xdata):
        d1 = 1 - np.exp(-(x[0]) * np.power(xdata, x[1]))
        return (d1)

    x1 = [0.054, 0.9272]
    x2 = [2.8478, 0.5196]
    x4 = [3.0126, 2.4419]
    scbf = smooth_image(nib.load(img), fwhm=5).get_fdata()  # smooth the image
    #load prob maps
    gmm = nib.load(gm).get_fdata()
    wmm = nib.load(wm).get_fdata()
    ccf = nib.load(csf).get_fdata()
    pbcf = 2.5 * gmm + wmm  # gmm is 2.5 times wm
    msk = np.array((scbf != 0) & (scbf != np.nan)
                   & (pbcf != np.nan)).astype(int)

    gm1 = np.array(gmm > thresh)
    wm1 = np.array(wmm > thresh)
    cc1 = np.array(ccf > thresh)
    r1 = np.array([0, np.corrcoef(scbf[msk == 1], pbcf[msk == 1])[1, 0]]).max()

    V=((np.sum(gm1)-1)*np.var(scbf[gm1>0])+(np.sum(wm1)-1)*np.var(scbf[wm1>0])+(np.sum(cc1)-1)  \
           *np.var(scbf[cc1>0]))/(np.sum(gm1>0)+np.sum(wm1>0)+np.sum(cc1>0)-3)

    negGM = np.sum(scbf[gm1] < 0) / (np.sum(gm1))
    GMCBF = np.mean(scbf[gm1])
    CV = V / np.abs(GMCBF)
    Q = [fun1(x1, CV), fun1(x2, negGM), fun2(x4, r1)]
    return gmean(Q)
Esempio n. 5
0
    def image_to_data(self, set_option):
        #Data set option specification
        if set_option == 'Train':
            path = "data/set_train/train_"
            self.Number_of_images = 278
        if set_option == 'Test':
            path = "data/set_test/test_"
            self.Number_of_images = 138

        if self.dimension_option == '2D':
            [row_low, row_up] = [0, 176]
            [column_low, column_up] = [0, 208]
            test_image_change = self.test_image[row_low:row_up,
                                                column_low:column_up]
            [w, h] = test_image_change.shape
            self.image_dimension = w * h
            self.dim = [w, h]
        elif self.dimension_option == '3D':
            [row_low, row_up] = [20, 50]
            [column_low, column_up] = [20, 50]
            [width_low, width_up] = [20, 50]
            test_image_change = self.test_image[row_low:row_up,
                                                column_low:column_up,
                                                width_low:width_up]
            [w, h, l] = test_image_change.shape
            self.image_dimension = w * h * l
            self.dim = [w, h, l]

        image_data = np.zeros((self.image_dimension, self.Number_of_images))

        #Image loading and data processing
        for i in range(1, self.Number_of_images + 1):
            print "Current image is :", str(i)
            nib_image = nib.load(path + str(i) + ".nii")
            nib_image = processing.smooth_image(nib_image, self.fwhm)
            image = nib_image.get_data()
            if self.dimension_option == '2D':
                I = image[:, :, self.slice, 0]
                I = np.asarray(I, dtype=float)
                I = I[row_low:row_up, column_low:column_up]
                # scale data
                #I = I / np.max(I)
                # Image processing
                #I = prewitt(I)  # Edge detection
                #I = gaussian(I, sigma=self.gauss)  # Gaussian blurring of the edges
                I = I / np.max(I)
            elif self.dimension_option == '3D':
                I = image[:, :, :, 0]
                I = I[row_low:row_up, column_low:column_up, width_low:width_up]
                #I = ndimage.prewitt(I, axis=-1)
                I = np.asarray(I, dtype=float)
                I = I / np.max(I)
            Iflat = I.flatten(order='C')  # Data flattening
            image_data[:, i - 1] = Iflat
        #image_data=np.transpose(image_data)
        print 'image to data : step complete'
        return image_data
Esempio n. 6
0
def test_spatial_axes_check():
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        with pytest.raises(ValueError):
            smooth_image(img, 0)
        with pytest.raises(ValueError):
            resample_from_to(img, img, mode='nearest')
        with pytest.raises(ValueError):
            resample_to_output(img, voxel_sizes(img.affine))
Esempio n. 7
0
def nib_smooth(file_mri, data, fwhm, tag, save_path):
    nii_file = nibabel.Nifti1Image(data, file_mri.affine, file_mri.header)
    smoothed = processing.smooth_image(nii_file, fwhm=fwhm, mode='nearest')
    smoothed_data = maxmin_norm(np.asanyarray(smoothed.dataobj))
    smoothed_file = nibabel.Nifti1Image(smoothed_data, file_mri.affine,
                                        file_mri.header)
    #     print(np.amax(smoothed_file.get_fdata()))
    nibabel.save(smoothed_file,
                 save_path + "fwhm_" + str(fwhm) + "_" + tag + ".nii")
    print("fwhm_" + str(fwhm) + "_" + tag + ".nii")
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(
        description='''This is a beta script for Partial Volume Correction in PET/MRI system. ''',
        epilog="""All's well that ends well.""")

    parser.add_argument('--nameDataset', metavar='', type=str, default="hybrid",
                        help='Name for the dataset needed to be sliced.(hybrid)<str>')

    args = parser.parse_args()
    name_dataset = args.nameDataset
    nii_list = glob.glob("./data/"+name_dataset+"/*.nii")+glob.glob("./data/"+name_dataset+"/*.nii.gz")
    nii_list.sort()
    n_channel = 3

    for nii_path in nii_list:
        print("@"*60)
        print(nii_path)
        nii_file = nib.load(nii_path)
        nii_name = os.path.basename(nii_path)
        nii_name = nii_name[:nii_name.find(".")]
        nii_header = nii_file.header
        nii_affine = nii_file.affine
        nii_data = np.asanyarray(nii_file.dataobj)
        nii_data_norm = maxmin_norm(nii_data)
        nii_smooth = processing.smooth_image(nii_file, fwhm=3, mode='nearest')
        nii_smooth_zoom = zoom(np.asanyarray(nii_smooth.dataobj), zoom=(1/2, 1/2, 1))
        nii_smooth_zoom_norm = maxmin_norm(nii_smooth_zoom)
        print("nii_data_norm", nii_data_norm.shape)
        print("nii_smooth_zoom_norm", nii_smooth_zoom_norm.shape)
        # nii_smooth_norm = maxmin_norm(np.asanyarray(nii_smooth.dataobj)) * 255

        dx, dy, dz = nii_data.shape
        save_path_X = "./z1_2x/"+name_dataset+"/"
        save_path_Y = "./z1_2x/"+name_dataset+"/"
        for path in [save_path_X, save_path_Y]:
            if not os.path.exists(path):
                os.makedirs(path)

        for package in [[nii_data_norm, save_path_Y, "_Y"], [nii_smooth_zoom_norm, save_path_X, "_X"]]:
            data = package[0]
            savepath = package[1]
            suffix = package[2]

            index = create_index(data, n_channel)
            img = np.zeros((data.shape[0], data.shape[1], n_channel))
            for idx_z in range(dz):
                for idx_c in range(n_channel):
                    # img[:, :, idx_c] = zoom(nii_data[:, :, int(index[idx_z, idx_c])], zoom=resize_f)
                    img[:, :, idx_c] = data[:, :, int(index[idx_z, idx_c])]
                name2save = savepath+nii_name+"_{0:03d}".format(idx_z)+suffix+".npy"
                np.save(name2save, img)
            print("#"*20)
            print("Last:", savepath+nii_name+"_{0:03d}".format(idx_z)+suffix+".npy")
            print(str(idx_z)+" images have been saved.")
Esempio n. 9
0
def downsample_HCPWuMinnContrast_dataset(dataset_path,
                                         dataset_info,
                                         in_postfix,
                                         out_postfix,
                                         voxel_scale=None):
    dimensions = dataset_info['dimensions']
    modalities = dataset_info['modalities']
    path = dataset_info['path']
    pattern = dataset_info['general_pattern']
    modality_categories = dataset_info['modality_categories']
    # in_postfix = dataset_info['postfix'][2] # raw input data name
    # out_postfix = dataset_info['postfix'][0] # processed output data name

    subject_lib = dataset_info['training_subjects'] + dataset_info[
        'test_subjects']
    num_volumes = dataset_info['num_volumes'][0] + dataset_info['num_volumes'][
        1]

    if voxel_scale is None:
        downsample_scale = dataset_info['downsample_scale']
        voxel_scale = [1, 1,
                       downsample_scale]  # downsample on an axial direction

    # in_data = np.zeros((num_volumes, modalities) + dimensions)
    # out_data = np.zeros((num_volumes, modalities) + dimensions)

    for img_idx in range(num_volumes):
        for mod_idx in range(modalities):
            in_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx], in_postfix)
            out_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx],
                out_postfix)
            print('Processing \'' + in_filename + '\'')
            data = nib.load(in_filename)  # load raw data
            fwhm = np.array(data.header.get_zooms()) * [
                0, 0, downsample_scale
            ]  # FWHM of Gaussian filter
            i_affine = np.dot(data.affine,
                              np.diag(voxel_scale + [1]))  # affine rescaling
            i_shape = np.array(
                data.shape) // voxel_scale  # downsampled shape of output
            data = smooth_image(data, fwhm)  # smoothed by FWHM
            data = resample_from_to(data, (i_shape, i_affine))  # resize
            nib.save(data, out_filename)
            print('Save to \'' + out_filename + '\'')

    return True
Esempio n. 10
0
def cbf_qei(gm, wm, csf, img, thresh=0.7):
    """
    Quality evaluation index of CBF base on Sudipto Dolui work 
    Dolui S., Wolf R. & Nabavizadeh S., David W., Detre, J. (2017). 
    Automated Quality Evaluation Index for 2D ASL CBF Maps. ISMR 2017

    """
    def fun1(x, xdata):
        d1 = np.exp(-(x[0])*np.power(xdata, x[1]))
        return(d1)

    def fun2(x, xdata):
        d1 = 1-np.exp(-(x[0])*np.power(xdata, x[1]))
        return(d1)

    x1 = [0.054, 0.9272]
    x2 = [2.8478, 0.5196]
    x4 = [3.0126, 2.4419]
    scbf = smooth_image(nb.load(img), fwhm=5).get_fdata()
    if len(scbf.shape) > 3:
        scbf = scbf[:, :, :, 0]
    # load prob maps
    gmm = nb.load(gm).get_fdata()
    wmm = nb.load(wm).get_fdata()
    ccf = nb.load(csf).get_fdata()
    if len(gmm.shape) > 3:
        gmm = gmm[:, :, :, 0]
        wmm = wmm[:, :, :, 0]
        ccf = ccf[:, :, :, 0]
    pbcf = 2.5*gmm+wmm  # gmm is 2.5 times wm
    msk = np.array((scbf != 0) & (scbf != np.nan) & (pbcf != np.nan)).astype(int)

    gm1 = np.array(gmm > thresh)
    wm1 = np.array(wmm > thresh)
    cc1 = np.array(ccf > thresh)
    r1 = np.array([0, np.corrcoef(scbf[msk == 1], pbcf[msk == 1])[1, 0]]).max()

    V = ((np.sum(gm1)-1)*np.var(scbf[gm1 > 0])+(np.sum(wm1)-1)*np.var(scbf[wm1 > 0])
         + (np.sum(cc1)-1) * np.var(scbf[cc1 > 0]))/(np.sum(gm1 > 0)+np.sum(wm1 > 0)
                                                     + np.sum(cc1 > 0)-3)

    negGM = np.sum(scbf[gm1] < 0)/(np.sum(gm1))
    GMCBF = np.mean(scbf[gm1])
    CV = V/np.abs(GMCBF)
    Q = [fun1(x1, CV), fun1(x2, negGM), fun2(x4, r1)]
    return gmean(Q)
Esempio n. 11
0
def test_spatial_axes_check():
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        assert_raises(ValueError, smooth_image, img, 0)
        assert_raises(ValueError, resample_from_to, img, img, mode='nearest')
        assert_raises(ValueError,
                      resample_to_output, img, voxel_sizes(img.affine))
Esempio n. 12
0
def image_smooth(image_fn, fwhm=None, output_dir=None):

    subj = os.path.split(output_dir)[-1]

    print('Smoothing {} with kernel size: {}'.format(subj, fwhm))

    im_name = os.path.split(image_fn)[-1].split('.')[0]

    im_hdr = nb.load(image_fn)

    smoothed = nbproc.smooth_image(img=im_hdr, fwhm=fwhm)

    output_fn = os.path.join(output_dir,
                             im_name + '_smoothed_' + str(fwhm) + '_mm.nii.gz')

    nb.Nifti1Image(smoothed.get_data(),
                   affine=im_hdr.affine,
                   header=im_hdr.header).to_filename(output_fn)

    return (output_fn)
Esempio n. 13
0
def process_data_DL(mat_data, name_dataset, mat_tag, mat_key, mat_name,
                    tmpl_header, tmpl_affine):

    if mat_key == "recon":
        print("Data dim:", mat_data.shape)
        save_name = "./data/" + name_dataset + "/" + mat_tag + "_" + mat_key + "/" + os.path.basename(
            mat_name)[:20] + ".nii"
        save_file = nib.Nifti1Image(mat_data,
                                    affine=tmpl_affine,
                                    header=tmpl_header)
        nib.save(save_file, save_name)
        print(save_name)

    if mat_key == "t1":
        px, py, pz = mat_data.shape
        qx, qy, qz = (256, 256, 56)
        zoom_data = zoom(mat_data, (qx / px, qy / py, qz / pz))
        standard_pet = np.zeros((256, 256, 89))
        standard_pet[:, :, 17:73] = zoom_data
        standard_pet[standard_pet < 0] = 0

        print("Old dim:", mat_data.shape)
        print("New dim:", standard_pet.shape)

        save_file = nib.Nifti1Image(standard_pet,
                                    affine=tmpl_affine,
                                    header=tmpl_header)
        save_name = "./data/" + name_dataset + "/" + mat_tag + "_gt/" + os.path.basename(
            mat_name)[:20] + ".nii"
        nib.save(save_file, save_name)
        print(save_name)

        smoothed_file = processing.smooth_image(save_file,
                                                fwhm=3,
                                                mode='nearest')
        save_name = "./data/" + name_dataset + "/" + mat_tag + "_f3/" + os.path.basename(
            mat_name)[:20] + ".nii"
        nib.save(smoothed_file, save_name)
        print(save_name)
Esempio n. 14
0
 def image_to_data(self, set_option):
     #Data set option specification
     if set_option == 'Train':
         path = "data/set_train/train_"
         self.Number_of_images = 278
     if set_option == 'Test':
         path = "data/set_test/test_"
         self.Number_of_images = 138
     #set output dimensions by the number of images
     test_image_size = np.zeros(self.dim)
     print test_image_size.shape
     [w, l] = test_image_size[20:160, 1:207].shape
     self.image_dimension = w * l
     print w, l
     image_data = np.zeros((self.image_dimension, self.Number_of_images))
     #Image loading and data processing
     for i in range(1, self.Number_of_images + 1):
         print "Current image is :", str(i)
         nib_image = nib.load(path + str(i) + ".nii")
         nib_image_smoothed = processing.smooth_image(nib_image, self.fwhm)
         image = nib_image_smoothed.get_data()
         if self.dimension_option == '2D':
             I = image[:, :, self.slice, 0]
             I = np.asarray(I, dtype=float)
         elif self.dimension_option == '3D':
             I = image[:, :, :, 0]
             I = np.asarray(I, dtype=float)
             #scale data
         I = I[20:160, 1:207]
         I = I / np.max(I)
         #Image processing
         I = prewitt(I)  #Edge detection
         I = gaussian(I, sigma=self.gauss)  #Gaussian blurring of the edges
         Iflat = I.flatten(order='C')  # Data flattening
         image_data[:, i - 1] = Iflat
     #image_data=np.transpose(image_data)
     print 'image to data : step complete'
     return image_data
Esempio n. 15
0
def gen_reference(in_img, fwhm=5, newpath=None):
    """generate reference for a GE scan with few volumes."""
    import nibabel as nb
    import numpy as np
    import os
    from nibabel.processing import smooth_image
    newpath = Path(newpath or ".")
    ss = check_img(in_img)
    if ss == 0:
        ref_data = nb.load(in_img).get_fdata()
    else:
        nii = nb.load(in_img).get_fdata()
        ref_data = np.mean(nii, axis=3)
    new_file = nb.Nifti1Image(dataobj=ref_data,
                              header=nb.load(in_img).header,
                              affine=nb.load(in_img).affine)

    new_file = smooth_image(new_file, fwhm=fwhm)
    out_file = fname_presuffix('aslref',
                               suffix="_reference.nii.gz",
                               newpath=str(newpath.absolute()))
    new_file.to_filename(out_file)
    return out_file
Esempio n. 16
0
    def __init__(self,
                 img_filename,
                 mask_filename,
                 noise_mask_filename=None,
                 atlas_filename=None,
                 segmentation_filename=None,
                 atlas_thr=100,
                 fwhm=0,
                 asl=False):
        #load imaging data
        self._img_filename = img_filename
        self._image_hd = nib.load(img_filename)
        if fwhm > 0:
            self._image_hd = processing.smooth_image(self._image_hd, fwhm)

        if asl:
            volumes = self._image_hd.get_data().shape[3]
            even = self._image_hd.get_data()[:, :, :, 0:volumes:2]
            odd = self._image_hd.get_data()[:, :, :, 1:volumes:2]
            self._image_volume = (even + odd) / 2

        else:
            self._image_volume = self._image_hd.get_data()

        self._mask_hd = nib.load(mask_filename)
        self._mask_volume = self._mask_hd.get_data()
        self._dims = self._image_volume.shape
        self._mask_v = self._mask_volume.reshape(self._dims[0] *
                                                 self._dims[1] * self._dims[2])
        self._image_mat = self._image_volume.reshape(
            self._dims[0] * self._dims[1] * self._dims[2], self._dims[3])

        # if seg is specified, use grey matter mask instead
        if segmentation_filename is not None:
            self._segmentation_filename = segmentation_filename
            self._segmentation_hd = nib.load(segmentation_filename)
            self._segmentation_volume = self._segmentation_hd.get_data()
            self._mask_v = self._segmentation_volume[:, :, :, 2].reshape(
                self._dims[0] * self._dims[1] * self._dims[2])

        self._image_mat_in_mask = self._image_mat[
            self._mask_v[:] > 0, :].astype(float)
        self._dims_mat = self._image_mat_in_mask.shape
        # demean and normalise
        self._image_mat_in_mask = self._image_mat_in_mask - self._image_mat_in_mask.mean(
            axis=1)[:, None]
        self._image_mat_in_mask_normalised = self._image_mat_in_mask / self._image_mat_in_mask.std(
            axis=1)[:, None]
        where_are_NaNs = np.isnan(self._image_mat_in_mask_normalised)
        self._image_mat_in_mask_normalised[where_are_NaNs] = 0

        if atlas_filename is not None:
            self._atlas_filename = atlas_filename
            self._atlas_hd = nib.load(atlas_filename)
            self._atlas_volume = self._atlas_hd.get_data()
            self._atlas_v = self._atlas_volume.reshape(
                self._dims[0] * self._dims[1] * self._dims[2])
            self._atlas_values_unique = np.unique(self._atlas_v)
            self._atlas_thr = atlas_thr
            self._atlas_values_unique = self._atlas_values_unique[
                self._atlas_values_unique >= self._atlas_thr]
            self._atlas_indices_dict = {}
            self._atlas_v_reduced = self._mask_v[self._mask_v > 0]
            self._image_mat_in_mask_normalised_atlas = np.zeros(
                (len(self._atlas_values_unique), self._dims[3]))
            self._image_mat_in_mask_atlas = np.zeros(
                (len(self._atlas_values_unique), self._dims[3]))
            # compute atlas-wise scan
            for index, mask_val in enumerate(self._atlas_values_unique):
                self._atlas_indices_dict[mask_val] = indices(
                    self._atlas_v_reduced, lambda x: x == mask_val)
                self._image_mat_in_mask_atlas[
                    index, :] = self._image_mat_in_mask[
                        self._atlas_indices_dict[mask_val], :].mean(axis=0)

            self._image_mat_in_mask_normalised_atlas = self._image_mat_in_mask_atlas - self._image_mat_in_mask_atlas.mean(
                axis=1)[:, None]
            self._image_mat_in_mask_normalised_atlas = self._image_mat_in_mask_normalised_atlas / self._image_mat_in_mask_normalised_atlas.std(
                axis=1)[:, None]
            where_are_NaNs = np.isnan(self._image_mat_in_mask_normalised_atlas)
            self._image_mat_in_mask_normalised_atlas[where_are_NaNs] = 0
            self._corr = np.corrcoef(self._image_mat_in_mask_normalised_atlas)

        if noise_mask_filename is not None:  # create volume, do temporal PCA and regress
            self._noise_mask_filename = noise_mask_filename
            self._noise_mask_hd = nib.load(noise_mask_filename)
            self._noise_mask_volume = self._noise_mask_hd.get_data()
            self._noise_mask_v = self._noise_mask_volume.reshape(
                self._dims[0] * self._dims[1] * self._dims[2])
Esempio n. 17
0
def test_smooth_image():
    # Test image smoothing
    data = np.arange(24).reshape((2, 3, 4))
    aff = np.diag([-4, 5, 6, 1])
    img = Nifti1Image(data, aff)
    # Zero smoothing is no-op
    out_img = smooth_image(img, 0)
    assert_array_equal(out_img.affine, img.affine)
    assert_array_equal(out_img.shape, img.shape)
    assert_array_equal(out_img.dataobj, data)
    # Isotropic smoothing
    sd = fwhm2sigma(np.true_divide(8, [4, 5, 6]))
    exp_out = spnd.gaussian_filter(data, sd, mode='nearest')
    assert_array_equal(smooth_image(img, 8).dataobj, exp_out)
    assert_array_equal(smooth_image(img, [8, 8, 8]).dataobj, exp_out)
    with pytest.raises(ValueError):
        smooth_image(img, [8, 8])
    # Not isotropic
    mixed_sd = fwhm2sigma(np.true_divide([8, 7, 6], [4, 5, 6]))
    exp_out = spnd.gaussian_filter(data, mixed_sd, mode='nearest')
    assert_array_equal(smooth_image(img, [8, 7, 6]).dataobj, exp_out)
    # In 2D
    img_2d = Nifti1Image(data[0], aff)
    exp_out = spnd.gaussian_filter(data[0], sd[:2], mode='nearest')
    assert_array_equal(smooth_image(img_2d, 8).dataobj, exp_out)
    assert_array_equal(smooth_image(img_2d, [8, 8]).dataobj, exp_out)
    with pytest.raises(ValueError):
        smooth_image(img_2d, [8, 8, 8])
    # Isotropic in 4D has zero for last dimension in scalar case
    data_4d = np.arange(24 * 5).reshape((2, 3, 4, 5))
    img_4d = Nifti1Image(data_4d, aff)
    exp_out = spnd.gaussian_filter(data_4d, list(sd) + [0], mode='nearest')
    assert_array_equal(smooth_image(img_4d, 8).dataobj, exp_out)
    # But raises error for vector case
    with pytest.raises(ValueError):
        smooth_image(img_4d, [8, 8, 8])
    # mode, cval
    exp_out = spnd.gaussian_filter(data, sd, mode='constant')
    assert_array_equal(smooth_image(img, 8, mode='constant').dataobj, exp_out)
    exp_out = spnd.gaussian_filter(data, sd, mode='constant', cval=99)
    assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj,
                       exp_out)
    # out_class
    img_ni1 = Nifti2Image(data, np.eye(4))
    img_ni2 = Nifti2Image(data, np.eye(4))
    # Default is Nifti1Image
    assert smooth_image(img_ni2, 0).__class__ == Nifti1Image
    # Can be overriden
    assert smooth_image(img_ni1, 0, out_class=Nifti2Image).__class__ == Nifti2Image
    # None specifies out_class from input
    assert smooth_image(img_ni2, 0, out_class=None).__class__ == Nifti2Image
Esempio n. 18
0
def test_smooth_image():
    # Test image smoothing
    data = np.arange(24).reshape((2, 3, 4))
    aff = np.diag([-4, 5, 6, 1])
    img = Nifti1Image(data, aff)
    # Zero smoothing is no-op
    out_img = smooth_image(img, 0)
    assert_array_equal(out_img.affine, img.affine)
    assert_array_equal(out_img.shape, img.shape)
    assert_array_equal(out_img.dataobj, data)
    # Isotropic smoothing
    sd = fwhm2sigma(np.true_divide(8, [4, 5, 6]))
    exp_out = spnd.gaussian_filter(data, sd, mode='nearest')
    assert_array_equal(smooth_image(img, 8).dataobj, exp_out)
    assert_array_equal(smooth_image(img, [8, 8, 8]).dataobj, exp_out)
    assert_raises(ValueError, smooth_image, img, [8, 8])
    # Not isotropic
    mixed_sd = fwhm2sigma(np.true_divide([8, 7, 6], [4, 5, 6]))
    exp_out = spnd.gaussian_filter(data, mixed_sd, mode='nearest')
    assert_array_equal(smooth_image(img, [8, 7, 6]).dataobj, exp_out)
    # In 2D
    img_2d = Nifti1Image(data[0], aff)
    exp_out = spnd.gaussian_filter(data[0], sd[:2], mode='nearest')
    assert_array_equal(smooth_image(img_2d, 8).dataobj, exp_out)
    assert_array_equal(smooth_image(img_2d, [8, 8]).dataobj, exp_out)
    assert_raises(ValueError, smooth_image, img_2d, [8, 8, 8])
    # Isotropic in 4D has zero for last dimension in scalar case
    data_4d = np.arange(24 * 5).reshape((2, 3, 4, 5))
    img_4d = Nifti1Image(data_4d, aff)
    exp_out = spnd.gaussian_filter(data_4d, list(sd) + [0], mode='nearest')
    assert_array_equal(smooth_image(img_4d, 8).dataobj, exp_out)
    # But raises error for vector case
    assert_raises(ValueError, smooth_image, img_4d, [8, 8, 8])
    # mode, cval
    exp_out = spnd.gaussian_filter(data, sd, mode='constant')
    assert_array_equal(smooth_image(img, 8, mode='constant').dataobj, exp_out)
    exp_out = spnd.gaussian_filter(data, sd, mode='constant', cval=99)
    assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj,
                       exp_out)
    # out_class
    img_ni1 = Nifti2Image(data, np.eye(4))
    img_ni2 = Nifti2Image(data, np.eye(4))
    # Default is Nifti1Image
    assert_equal(
        smooth_image(img_ni2, 0).__class__,
        Nifti1Image)
    # Can be overriden
    assert_equal(
        smooth_image(img_ni1, 0, out_class=Nifti2Image).__class__,
        Nifti2Image)
    # None specifies out_class from input
    assert_equal(
        smooth_image(img_ni2, 0, out_class=None).__class__,
        Nifti2Image)
Esempio n. 19
0
def binary_volume_mask_from_surface_vertices(
    left_surface_file,
    right_surface_file,
    nifti_file,
    outfile,
    thickness=5,
    fwhm=2,
    cifti_file=os.path.join(_main_dir,
                            'data/templates/cifti/ones.dscalar.nii')):
    """
    This function generates a binary nifti volume mask (in the space of the
    nifti file) from a surface mesh file (.gii) by smoothing and thresholding
    the smoothed nifti (the threshold aims to make it nearly as thick as
    thickness value in mm)
    """
    # load the sample nifti file
    nifti = nib.load(nifti_file)

    # load the brain models from a sample cifti file
    cifti = nib.load(cifti_file)
    brain_models = [x for x in cifti.header.get_index_map(1).brain_models]

    # load surface files
    left_surface = nib.load(left_surface_file)
    right_surface = nib.load(right_surface_file)

    # get the list of surface vertices that have value in the cifti file (valid cortical surfaces)
    surfacexyz = np.vstack([
        left_surface.darrays[0].data[brain_models[0].vertex_indices],
        right_surface.darrays[0].data[brain_models[1].vertex_indices]
    ])

    # find the closest voxel to the vertex
    voxels_ijk = np.round(
        nib.affines.apply_affine(np.linalg.inv(nifti.affine), surfacexyz))

    # create an integer weighted mask from the closest voxel list
    data = nifti.get_data() * 0
    for (i, j, k) in voxels_ijk.astype(int):
        try:
            data[i, j, k] += 1
        except IndexError:
            print((i, j, k))

    # save the projections of vertices to the volume as a nifti image
    vertex_projection_mask = nib.nifti1.Nifti1Image(data, nifti.affine,
                                                    nifti.header, nifti.extra)

    # smooth the image in volume space
    smoothed_projection_mask = nibprocessing.smooth_image(
        vertex_projection_mask, fwhm)

    # threshold the smoothed data and binarise
    threshold = _gaussian_smoothing_cut_value(_fwhm2sigma(fwhm), thickness / 2,
                                              2)
    data = smoothed_projection_mask.get_data()
    # data = (data>threshold)*data
    data = (data > threshold) * 1.

    # save the final thresholded mask
    binarised_thresholded_projection_mask = nib.nifti1.Nifti1Image(
        data, nifti.affine, nifti.header, nifti.extra)

    nib.save(binarised_thresholded_projection_mask, outfile)
Esempio n. 20
0
    def __init__(self, img_filename, mask_filename, noise_mask_filename=None, atlas_filename=None, segmentation_filename=None, atlas_thr=100, fwhm=0, asl=False):
        #load imaging data
        self._img_filename = img_filename
        self._image_hd = nib.load(img_filename)
        if fwhm>0:
            self._image_hd = processing.smooth_image(self._image_hd, fwhm)

        if asl:
            volumes = self._image_hd.get_data().shape[3]
            even = self._image_hd.get_data()[:,:,:,0:volumes:2]
            odd = self._image_hd.get_data()[:,:,:,1:volumes:2]
            self._image_volume = (even + odd)/2

        else:
            self._image_volume = self._image_hd.get_data()


        self._mask_hd = nib.load(mask_filename)
        self._mask_volume = self._mask_hd.get_data()
        self._dims = self._image_volume.shape
        self._mask_v = self._mask_volume.reshape(self._dims[0]*self._dims[1]*self._dims[2])
        self._image_mat = self._image_volume.reshape(self._dims[0]*self._dims[1]*self._dims[2],self._dims[3])


        # if seg is specified, use grey matter mask instead
        if segmentation_filename is not None:
            self._segmentation_filename = segmentation_filename
            self._segmentation_hd = nib.load(segmentation_filename)
            self._segmentation_volume = self._segmentation_hd.get_data()
            self._mask_v = self._segmentation_volume[:,:,:,2].reshape(self._dims[0]*self._dims[1]*self._dims[2])

        self._image_mat_in_mask = self._image_mat[self._mask_v[:]>0,:].astype(float)
        self._dims_mat = self._image_mat_in_mask.shape
        # demean and normalise
        self._image_mat_in_mask = self._image_mat_in_mask-self._image_mat_in_mask.mean(axis=1)[:,None]
        self._image_mat_in_mask_normalised=self._image_mat_in_mask/self._image_mat_in_mask.std(axis=1)[:,None]
        where_are_NaNs = np.isnan(self._image_mat_in_mask_normalised)
        self._image_mat_in_mask_normalised[where_are_NaNs] = 0

        if atlas_filename is not None:
            self._atlas_filename = atlas_filename
            self._atlas_hd = nib.load(atlas_filename)
            self._atlas_volume = self._atlas_hd.get_data()
            self._atlas_v = self._atlas_volume.reshape(self._dims[0]*self._dims[1]*self._dims[2])
            self._atlas_values_unique = np.unique(self._atlas_v)
            self._atlas_thr = atlas_thr
            self._atlas_values_unique=self._atlas_values_unique[self._atlas_values_unique >= self._atlas_thr]
            self._atlas_indices_dict = {}
            self._atlas_v_reduced = self._mask_v[self._mask_v>0]
            self._image_mat_in_mask_normalised_atlas = np.zeros((len(self._atlas_values_unique), self._dims[3]))
            self._image_mat_in_mask_atlas = np.zeros((len(self._atlas_values_unique), self._dims[3]))
            # compute atlas-wise scan
            for index, mask_val in enumerate(self._atlas_values_unique):
                self._atlas_indices_dict[mask_val] = indices(self._atlas_v_reduced, lambda x: x == mask_val)
                self._image_mat_in_mask_atlas[index,:] = self._image_mat_in_mask[self._atlas_indices_dict[mask_val],:].mean(axis=0)

            self._image_mat_in_mask_normalised_atlas=self._image_mat_in_mask_atlas-self._image_mat_in_mask_atlas.mean(axis=1)[:,None]
            self._image_mat_in_mask_normalised_atlas=self._image_mat_in_mask_normalised_atlas/self._image_mat_in_mask_normalised_atlas.std(axis=1)[:,None]
            where_are_NaNs = np.isnan(self._image_mat_in_mask_normalised_atlas)
            self._image_mat_in_mask_normalised_atlas[where_are_NaNs] = 0
            self._corr = np.corrcoef(self._image_mat_in_mask_normalised_atlas)


        if noise_mask_filename is not None: # create volume, do temporal PCA and regress
            self._noise_mask_filename = noise_mask_filename
            self._noise_mask_hd = nib.load(noise_mask_filename)
            self._noise_mask_volume = self._noise_mask_hd.get_data()
            self._noise_mask_v = self._noise_mask_volume.reshape(self._dims[0]*self._dims[1]*self._dims[2])
Esempio n. 21
0
def connectivity_fmri(time_series: str,
                      seed: str,
                      target: str,
                      log_file: str,
                      participant_id: str,
                      out: str,
                      confounds: str = None,
                      sep: str = None,
                      usecols: list = None,
                      apply_arctanh: bool = True,
                      apply_pca: bool = False,
                      pca_components: float = 0.95,
                      apply_bandpass: bool = False,
                      bandpass_band: tuple = (0.01, 0.08),
                      bandpass_tr: float = None,
                      apply_smoothing: bool = False,
                      smoothing_fwhm: float = 0.,
                      apply_low_variance_threshold: bool = True,
                      low_variance_in_seed: float = 0.05,
                      low_variance_in_target: float = 0.1,
                      low_variance_behavior: str = 'zero',
                      compress_output: bool = True) -> None:
    """ Compute a connectivity matrix from functional data.

    Processing steps:
      [1] Apply smoothing to time-series data (optional),
      [2] Apply ROI and target mask to the time-series,
      [3] apply nuisance signal regression on the roi- and target-masked
          time-series separately (optional),
      [4] apply band-pass filtering on the roi- and target-masked
          time-series separately (optional),
      [5] compute the correlation between the roi-masked time-series and
          target-masked time-series,
      [6] apply an arctanh transformation (np.arctanh) on the
          connectivity matrix (optional),
      [7] apply principal component analysis on the connectivity
          matrix (optional)

    Parameters
    ----------
    time_series: str
        Path to the time-series nifti image
    seed: str
        Path to the seed mask nifti image
    target: str
        Path to the target mask nifti image
    participant_id: str
        Unique identifier of the participant currently being processed
    out: str
        Output filename for the connectivity matrix (.npy)
    log_file : str
        Output filename for the log file (.log)
    seed_low_variance : float
        Percentage of low-variance voxels (tolerance of
        np.finfo(np.float32).eps) allowed to be within the seed
    target_low_variance : float
        Percentage of low-variance voxels (tolerance of
        np.finfo(np.float32).eps) allowed to be within the target
    smoothing_fwhm: int, optional
        Smoothing kernel at FWHM in milimeters. If None, smoothing is
        skipped.
    confounds : str, optional
        Path to a tabular confounds file.
    sep : str, optional
        Separator used for reading the confounds file (e.g., .tsv has
        '\t', .csv has ',' or ';')
    usecols : list, optional
        List containing the names of all columns that will be used
        for nuisance signal regression. If a confounds file is given,
        but this argument is not, all columns will be used.
        Wildcards are allowed (e.g., 'motion-*' will include all
        columns that start with 'motion-')
    band_pass: tuple, optional
        Band-pass filter parameters. The following order should be
        maintained for the tuple: (high_pass, low_pass, tr).
        If one is not given, band-pass filtering cannot proceed.
    arctanh_transform: bool, optional
        Apply np.arctanh to the connectivity matrix. This will be
        applied by default.
    pca_transform: float, optional
        Apply sklearn.decomposition.PCA to the connectivity matrix.
        The parameter value is used as the n_components value
        specified in the description of the aforementioned class.
        All other parameters are kept to sklearn defaults. Applying
        this returns the ROI-voxels by principal components as a
        connectivity matrix. If this is set to None, this step will
        be ignored.
    compress_output: bool, optional
        Compress the output connectivity matrices using numpy savez_compressed
        to reduce the size on disk.
    """

    time_series = nib.load(time_series)
    seed_img = nib.load(seed)
    target_img = nib.load(target)

    if not img_is_4d(time_series):
        raise DimensionError(4, len(time_series.shape))

    if apply_smoothing:
        time_series = smooth_image(time_series, fwhm=smoothing_fwhm)

    seed_series = get_masked_series(time_series, seed_img)
    target_series = get_masked_series(time_series, target_img)
    del time_series
    gc.collect()

    # Identify low-variance voxels and log them

    in_seed = find_low_variance_voxels(data=seed_series)
    in_target = find_low_variance_voxels(data=target_series)
    bad_seed = in_seed.size / np.count_nonzero(
        seed_img.get_data()) > low_variance_in_seed
    bad_target = in_target.size / np.count_nonzero(
        target_img.get_data()) > low_variance_in_target

    pd.DataFrame(
        data=[[participant_id, in_seed, in_target, bad_seed or bad_target]],
        columns=[
            'participant_id', 'low_variance_in_seed', 'low_variance_in_target',
            'low_variance_excluded'
        ]).to_csv(log_file, sep='\t', index=False)

    # If the participant has data exceeding the seed- or target low
    # variance threshold, output an empty file
    if apply_low_variance_threshold:
        if bad_seed or bad_target:
            np.savez(out, connectivity=np.array([]))
            return

    # Nuisance Signal Regression
    if confounds is not None:
        # Fix separator if needed
        if sep is None:
            ext = os.path.splitext(confounds)[-1]
            separators = {'.tsv': '\t', '.csv': ','}
            if ext in separators.keys():
                sep = separators[ext]

        # Check if usecols contains wildcards to extend upon the header
        if usecols is not None:
            usecols = set(usecols)
            header = pd.read_csv(confounds, sep=sep, header=None,
                                 nrows=1).values.tolist()[0]
            usecols = [
                x for x in header if any(fnmatch(x, p) for p in usecols)
            ]

        confounds = pd.read_csv(confounds, sep=sep, usecols=usecols).values
        seed_series = nuisance_signal_regression(seed_series,
                                                 confounds=confounds,
                                                 demean=False)
        target_series = nuisance_signal_regression(target_series,
                                                   confounds=confounds,
                                                   demean=False)

    # Apply band-pass filter if high_pass, low_pass, and tr are defined
    if apply_bandpass:
        high_pass, low_pass = bandpass_band
        if all([low_pass, high_pass, bandpass_tr]):
            seed_series = fft_filter(seed_series,
                                     low_pass=low_pass,
                                     high_pass=high_pass,
                                     tr=bandpass_tr)
            target_series = fft_filter(target_series,
                                       low_pass=low_pass,
                                       high_pass=high_pass,
                                       tr=bandpass_tr)

    connectivity = seed_based_correlation(x=seed_series,
                                          y=target_series,
                                          standardize=True)

    if apply_arctanh:
        # Values at 1 or -1 causing atanh inf's, here we set them slightly
        # below 1 or above -1.
        connectivity[connectivity >= 1] = np.nextafter(np.float32(1.),
                                                       np.float32(-1))
        connectivity[connectivity <= -1] = np.nextafter(
            np.float32(-1.), np.float32(1))
        connectivity = np.arctanh(connectivity)

    if apply_pca:
        connectivity = detrend(connectivity, axis=1, type='constant')
        pca = PCA(n_components=pca_components)
        connectivity = pca.fit_transform(connectivity)

    # Ensure float32
    connectivity = connectivity.astype(np.float32)

    if compress_output:
        np.savez_compressed(out, connectivity=connectivity)
    else:
        np.savez(out, connectivity=connectivity)
def volume_4D_realign(img_path,
                      reference=0,
                      smooth_fwhm=0,
                      jitter=0.0,
                      prefix='r',
                      drop_vols=0,
                      guess_params=np.zeros(6),
                      mean_ref=np.zeros((64, 64, 30))):
    """ Realign volumes obtained from a 4D Nifti1Image file

    Input
    ----------
    img_path: string
        filepath of the 4D .nii image for which volumes will be realigned

    reference: int
        int with value 0 (default) or 1, indicating to use the first (0) or middle (1) volume as the reference

    smooth_fwhm: int
        value (in mm) of the FWHM used to smooth the image data before realignment

    jitter: float
        amount of jitter optional to add random noise for realignment

    prefix: string
        short string (default = 'r') added as the prefix for the returned 4D nii image

    drop_vols: int
        index of the volume before which all volumes will be dropped (defualt = 0)

    mean_ref: array shape (I x J x K)
        Mean functional volume (if provided this will be the reference)

    Output
    -------
    realigned_img: .nii file
        Nifti1Image containing the realigned volumes

    realign_params: array shape (volumes x 6)
        2D numpy array containign the 6 rigid body transformation values for each volume
    """
    #img_dir, img_name = os.path.split(img_path)

    img = nib.load(img_path)
    data = img.get_data()

    # Dropping volumes, specified by input of drop_vols
    data = data[..., drop_vols:]

    # smooth image, if designated
    if smooth_fwhm > 0:
        fwhm = smooth_fwhm
        img_smooth = proc.smooth_image(img, fwhm)
        img_optimize = img_smooth
        data_optimize = img_smooth.get_data()
    else:
        img_optimize = img
        data_optimize = data

    n_vols = data.shape[-1]
    print('Number of volumes to realign:', n_vols)

    # set whether the reference should be the first volume or middle volume
    if reference == 0:
        ref_vol = data_optimize[..., 0]
    elif reference == 1:
        mid_index = int(n_vols / 2)
        mid_vol = data_optimize[..., mid_index]
        ref_vol = mid_vol

    if sum(sum(sum(mean_ref))) > 0:
        ref_vol = mean_ref

    # array to which the realignment parameters for each of the 6 rigid body parameters
    #  will be added for each volume
    realign_params = np.zeros((n_vols, 6))
    realigned_data = np.zeros((data.shape))
    for i in range(n_vols):
        # Use either zeros (default) or inputted parameters tog uess starting point for volume realignment
        if len(guess_params.shape) > 1:
            guess_params_vol = guess_params[i, :] * (-1)
        else:
            guess_params_vol = guess_params

        # getting best parameters for the i-th volume
        best_params = optimize_map_vol(ref_vol,
                                       data_optimize[..., i],
                                       img_optimize.affine,
                                       guess_params_vol,
                                       jitter=jitter)

        # resampling using params determined above
        resampled_vol = apply_coord_mapping(best_params, ref_vol, data[..., i],
                                            img.affine)

        # add 6 rigid body parameters to array
        #params = np.append(trans_params, rot_params)
        realign_params[i, :] = best_params

        print('Realigned volume:', i)

        # place new realigned vol in new 4d realigned data array
        realigned_data[..., i] = resampled_vol

        # save realigned img with the prefix to the same directory as the input img
        realigned_img = nib.Nifti1Image(data, img.affine)

    return realigned_img, realign_params
Esempio n. 23
0
def connectivity_rsfmri(input: dict, output: dict, params: dict,
                        log: list) -> None:
    """ Compute a connectivity matrix from functional data.

    Processing steps:
      [1] Apply smoothing to time-series data (optional),
      [2] Apply ROI and target mask to the time-series,
      [3] apply nuisance signal regression on the roi- and target-masked
          time-series separately (optional),
      [4] apply band-pass filtering on the roi- and target-masked
          time-series separately (optional),
      [5] compute the correlation between the roi-masked time-series and
          target-masked time-series,
      [6] apply an arctanh transformation (np.arctanh) on the
          connectivity matrix (optional),
      [7] apply principal component analysis on the connectivity
          matrix (optional)

    Parameters
    ----------
    input : dict
        Input files, allowed: {time_series, seed, target, confounds}
    output : dict
        Output files, allowed {connectivity}
    params : dict
        Parameters, allowed {arctanh_transform, pca_transform, bandpass,
        low_variance_error, compress, confounds, smoothing_fwhm}. For more
        information, see the CBPtools documentation on readthedocs.io under
        the parameters section for 'time_series_proc'.
    log : list
        Log files
    """

    # input, output, params
    time_series_file = input.get('time_series')
    seed_img_file = input.get('seed_mask')
    target_img_file = input.get('target_mask')
    confounds_file = input.get('confounds', None)
    connectivity_file = output.get('connectivity')
    log_file = log[0]
    smoothing = params.get('smoothing', False)
    lv_correction = params.get('low_variance_correction', False)
    lv_in_seed = params.get('low_variance_in_seed', None)
    lv_in_target = params.get('low_variance_in_target', None)
    confounds_sep = params.get('confounds_delimiter', None)
    confounds_cols = params.get('confounds_columns', None)
    bandpass = params.get('bandpass', False)
    pca_transform = params.get('pca_transform', False)
    arctanh_transform = params.get('arctanh_transform', False)
    compress = params.get('compress', False)

    # Set up logging
    logger = get_logger('connectivity_rsfmri', log_file)

    # Load input data
    time_series = nib.load(time_series_file)
    seed_img = nib.load(seed_img_file)
    target_img = nib.load(target_img_file)

    if not img_is_4d(time_series):
        logger.error('%s has incompatible dimensionality: Expected dimension'
                     'is %sD but a %sD image was provided' %
                     (time_series_file, 4, len(time_series.shape)))
        np.savez(connectivity_file, connectivity=np.array([]))
        return

    if smoothing:
        logger.info('applying smoothing (fwhm=%s) to %s' %
                    (smoothing, time_series_file))
        time_series = smooth_image(time_series, fwhm=smoothing)

    seed_series = get_masked_series(time_series, seed_img)
    target_series = get_masked_series(time_series, target_img)
    del time_series
    gc.collect()

    # Identify low-variance voxels and log them
    in_seed = find_low_variance_voxels(data=seed_series)
    in_target = find_low_variance_voxels(data=target_series)
    bad_seed = in_seed.size / np.count_nonzero(seed_img.get_data())
    bad_seed = bad_seed > lv_in_seed
    bad_target = in_target.size / np.count_nonzero(target_img.get_data())
    bad_target = bad_target > lv_in_target

    if in_seed.size > 0:
        logger.warning('%s low variance seed voxels found in %s' %
                       (in_seed, time_series_file))

    if in_target.size > 0:
        logger.warning('%s low variance target voxels found in %s' %
                       (in_seed, time_series_file))

    if lv_correction:
        if bad_seed:
            logger.error('number of low variance voxels in seed exceeds the'
                         'threshold (%s) for %s' %
                         (lv_in_seed, time_series_file))

        if bad_target:
            logger.error('number of low variance voxels in target exceeds the'
                         'threshold (%s) for %s' %
                         (lv_in_target, time_series_file))

        if bad_seed or bad_target:
            np.savez(connectivity_file, connectivity=np.array([]))
            return

    if in_target.size > 0 or in_seed.size > 0:
        # setting to 0 is done in the seed_based_correlation() method
        logger.warning('low variance voxels will be set to zero')

    # Nuisance Signal Regression
    if confounds_file:
        if confounds_sep is None:
            # Fix delimiter
            ext = os.path.splitext(confounds_file)[-1]
            separators = {'.tsv': '\t', '.csv': ','}
            if ext in separators.keys():
                confounds_sep = separators[ext]

        # Check if usecols contains wildcards to extend upon the header
        if confounds_cols:
            confounds_cols = set(confounds_cols)
            header = pd.read_csv(confounds_file,
                                 sep=confounds_sep,
                                 header=None,
                                 nrows=1)
            header = header.values.tolist()[0]
            confounds_cols = [
                x for x in header if any(
                    fnmatch(x, p) for p in confounds_cols)
            ]
        else:
            confounds_cols = None  # enforce none (e.g. if confound_cols = [])

        confounds = pd.read_csv(confounds_file,
                                sep=confounds_sep,
                                usecols=confounds_cols)

        if confounds_cols is None:
            logger.info('%s nuisance signal regression using all columns' %
                        time_series_file)
        else:
            logger.info('%s nuisance signal regression using: %s' %
                        (time_series_file,
                         str(confounds_cols).strip('[]').replace('\'', '')))

        confounds = confounds.values
        seed_series = nuisance_signal_regression(seed_series,
                                                 confounds=confounds,
                                                 demean=False)
        target_series = nuisance_signal_regression(target_series,
                                                   confounds=confounds,
                                                   demean=False)

    # Apply band-pass filter if high_pass, low_pass, and tr are defined
    if bandpass:
        (high_pass, low_pass), tr = bandpass
        logger.info('%s band-pass filtering (high-pass=%s, '
                    'low-pass=%s, tr=%s)' %
                    (time_series_file, high_pass, low_pass, tr))
        seed_series = fft_filter(seed_series, low_pass, high_pass, tr)
        target_series = fft_filter(target_series, low_pass, high_pass, tr)

    # Compute connectivity matrix
    r = seed_based_correlation(seed_series, target_series, True)

    # Set values slightly below 1 or above -1 (for use with, e.g., arctanh)
    r[r >= 1] = np.nextafter(np.float32(1.), np.float32(-1))
    r[r <= -1] = np.nextafter(np.float32(-1.), np.float32(1))

    if arctanh_transform:
        logger.info('%s applying arctanh transform' % connectivity_file)
        r = np.arctanh(r)

    if pca_transform:
        logger.info('%s applying PCA transform (n_components=%s)' %
                    (connectivity_file, pca_transform))
        r = detrend(r, axis=1, type='constant')
        pca = PCA(n_components=pca_transform)
        r = pca.fit_transform(r)

    # Ensure float32
    r = r.astype(np.float32)

    # Save output
    if compress:
        np.savez_compressed(connectivity_file, connectivity=r)
    else:
        np.savez(connectivity_file, connectivity=r)
Esempio n. 24
0
# compute relative cbf

cbf = img1.get_fdata()
cbf1 = cbf[logmask] / np.mean(cbf[logmask])
img2 = np.zeros(shape=[img1.shape[0], img1.shape[1], img1.shape[2]])
img2[logmask] = cbf1
img_rel = nib.Nifti1Image(dataobj=img2, affine=img1.affine, header=img1.header)
out3 = out + 'R.nii.gz'
nib.save(img_rel, out3)

if gm and wm and csf:
    img_3 = nib.Nifti1Image(dataobj=cbf,
                            affine=img1.affine,
                            header=img1.header)
    scbf = smooth_image(img_3, fwhm=5)
    cbf = scbf.get_fdata()
    out3 = out + '_QEI.txt'

    gm0 = nib.load(gm).get_fdata()
    if len(gm0.shape) == 4:
        gmm = gm0[..., -1]
        wm0 = nib.load(wm).get_fdata()
        wmm = wm0[..., -1]
        cm0 = nib.load(csf).get_fdata()
        ccf = cm0[..., -1]
    else:
        gmm = gm0
        wmm = nib.load(wm).get_fdata()
        ccf = nib.load(csf).get_fdata()
Esempio n. 25
0
    def _run_interface(self, runtime):
        file1 = os.path.abspath(self.inputs.in_file)
        # check if there is m0 file
        m0num=1
        m0file=[]
        aslfile_linkedM0=[]

        if self.inputs.in_metadata['M0'] != "True" and self.inputs.in_metadata['M0'] != "False" and type(self.inputs.in_metadata['M0']) != int :
            m0file=os.path.abspath(self.inputs.bids_dir+'/'+self.inputs.in_metadata['M0'])
            m0file_metadata=readjson(m0file.replace('nii.gz','json'))
            aslfile_linkedM0 = os.path.abspath(self.inputs.bids_dir+'/'+m0file_metadata['IntendedFor'])
        elif type(self.inputs.in_metadata['M0']) == int :
            m0num=int(self.inputs.in_metadata['M0'])
        else:
            print('no M0 file or numerical M0, the average control will be used \
             in the case of deltam, M0 is required for cbf quantifcation') 
        
        aslcontext1 = file1.replace('_asl.nii.gz', '_aslcontext.tsv')
        aslcontext = pd.read_csv(aslcontext1)
        
        

        idasl = aslcontext['volume_type'].tolist()

        # get the control,tag,moscan or label 
        controllist = [i for i in range(0, len(idasl)) if idasl[i] == 'control']
        labellist = [i for i in range(0, len(idasl)) if idasl[i] == 'label']
        m0list = [i for i in range(0, len(idasl)) if idasl[i] == 'm0scan']

        deltamlist = [i for i in range(0, len(idasl)) if idasl[i] == 'deltam']

        
        allasl = nb.load(self.inputs.asl_file)
        mask = nb.load(self.inputs.in_mask).get_fdata()
        dataasl = allasl.get_fdata()

        if len(dataasl.shape) == 5:
            raise RuntimeError('Input image (%s) is 5D.')

        if len(deltamlist) > 0 : 
            cbf_data = dataasl[:, :, :, deltamlist]
        elif len(labellist) > 0 :
            control_img = dataasl[:, :, :, controllist]
            label_img = dataasl[:, :, :, labellist] 
            cbf_data = np.subtract(control_img, label_img)
        else: 
            raise RuntimeError('no valid asl or cbf image.')
      
        
        if self.inputs.dummy_vols != 0:
            cbf_data = np.delete(cbf_data, range(0, self.inputs.dummy_vols), axis=3)
            #control_img = np.delete(control_img, range(0, self.inputs.dummy_vols), axis=3)

        # MO file
        if m0file or aslfile_linkedM0 :
            # get the raw m0 file also check intended for
            #m0file=nb.load(m0file).get_fdata()
            #regsiter m0file to aslfile here
            if m0file:
                m0file = m0file
            else:
                m0file = aslfile_linkedM0

            newm0 = fname_presuffix(self.inputs.asl_file,
                                                    suffix='_m0file') 
            newm0 = regmotoasl(asl=self.inputs.asl_file,m0file=m0file,m02asl=newm0)
            m0data_smooth = smooth_image(nb.load(newm0), fwhm=self.inputs.fwhm).get_data()
            if len(m0data_smooth.shape) > 3 :
                avg_control = mask*np.mean(m0data_smooth, axis=3)
            else:
                avg_control = mask*m0data_smooth

        elif len(m0list) > 0 and self.inputs.in_metadata['M0'] == "True" :
            # if no m0file, check from asl data
            modata2 = dataasl[:, :, :, m0list]
            con2 = nb.Nifti1Image(modata2, allasl.affine, allasl.header)
            m0data_smooth = smooth_image(con2, fwhm=self.inputs.fwhm).get_data()
            if len(m0data_smooth.shape) > 3 :
                avg_control = mask*np.mean(m0data_smooth, axis=3)
            else:
                avg_control = mask*m0data_smooth
        elif len(controllist) > 0:
            # else use average control
            control_img = dataasl[:, :, :, controllist]
            con = nb.Nifti1Image(control_img, allasl.affine, allasl.header)
            control_img1 = smooth_image(con, fwhm=self.inputs.fwhm).get_data()
            avg_control = mask*np.mean(control_img1, axis=3)
        else: 
            'precomputed m0 number will be used'
            avg_control = mask*(np.mean(np.ones_like(cbf_data),axis=3))
            avg_control = m0num*avg_control


        self._results['out_file'] = fname_presuffix(self.inputs.in_file,
                                                    suffix='_cbftimeseries', newpath=runtime.cwd)
        self._results['out_avg'] = fname_presuffix(self.inputs.in_file,
                                                   suffix='_avg_control', newpath=runtime.cwd)
        nb.Nifti1Image(
            np.divide(cbf_data,m0num), allasl.affine, allasl.header).to_filename(
            self._results['out_file'])
        nb.Nifti1Image(
            avg_control,allasl.affine, allasl.header).to_filename(
            self._results['out_avg'])

        self.inputs.out_file = os.path.abspath(self._results['out_file'])
        self.inputs.out_avg = os.path.abspath(self._results['out_avg'])
        return runtime
Esempio n. 26
0
#############################################
#       Data loading and preprocessing      #
#############################################

Targets = np.genfromtxt("data/targets.csv")

[x_low, x_up] = [50, 120]
[y_low, y_up] = [80, 150]
[z_low, z_up] = [50, 100]

Data = []
for i in range(1, 279):
    print "Train image ", str(i), "processed"
    imagefile = nib.load("data/set_train/train_"+str(i)+".nii")
    imagefile = processing.smooth_image(imagefile, 1)
    image = imagefile.get_data()
    I = image[:, :, :, 0]
    I=I[x_low:x_up, y_low:y_up]
    I = np.asarray(I, dtype=float)
    I = I/np.max(I)
    imagefile.uncache()
    I = I.flatten(order='C')
    Data.append(np.asarray(I))


X_train = Data
y_train = Targets

print np.asarray(X_train).shape