Exemple #1
0
def test_reconst_ivim():

    with TemporaryDirectory() as out_dir:
        bvals = np.array([
            0., 10., 20., 30., 40., 60., 80., 100., 120., 140., 160., 180.,
            200., 300., 400., 500., 600., 700., 800., 900., 1000.
        ])
        N = len(bvals)
        bvecs = generate_bvecs(N)
        temp_bval_path = pjoin(out_dir, "temp.bval")
        np.savetxt(temp_bval_path, bvals)
        temp_bvec_path = pjoin(out_dir, "temp.bvec")
        np.savetxt(temp_bvec_path, bvecs)

        gtab = gradient_table(bvals, bvecs)

        S0, f, D_star, D = 1000.0, 0.132, 0.00885, 0.000921

        mevals = np.array(([D_star, D_star, D_star], [D, D, D]))
        # This gives an isotropic signal.
        data = multi_tensor(gtab,
                            mevals,
                            snr=None,
                            S0=S0,
                            fractions=[f * 100, 100 * (1 - f)])
        # Single voxel data
        data_single = data[0]
        temp_affine = np.eye(4)

        data_multi = np.zeros((2, 2, 1, len(gtab.bvals)), dtype=int)
        data_multi[0, 0, 0] = data_multi[0, 1, 0] = data_multi[
            1, 0, 0] = data_multi[1, 1, 0] = data_single
        data_path = pjoin(out_dir, 'tmp_data.nii.gz')
        save_nifti(data_path, data_multi, temp_affine)

        mask = np.ones_like(data_multi[..., 0], dtype=np.uint8)
        mask_path = pjoin(out_dir, 'tmp_mask.nii.gz')
        save_nifti(mask_path, mask, temp_affine)

        ivim_flow = ReconstIvimFlow()

        args = [data_path, temp_bval_path, temp_bvec_path, mask_path]

        ivim_flow.run(*args, out_dir=out_dir)

        S0_path = ivim_flow.last_generated_outputs['out_S0_predicted']
        S0_data = load_nifti_data(S0_path)
        assert_equal(S0_data.shape, data_multi.shape[:-1])

        f_path = ivim_flow.last_generated_outputs['out_perfusion_fraction']
        f_data = load_nifti_data(f_path)
        assert_equal(f_data.shape, data_multi.shape[:-1])

        D_star_path = ivim_flow.last_generated_outputs['out_D_star']
        D_star_data = load_nifti_data(D_star_path)
        assert_equal(D_star_data.shape, data_multi.shape[:-1])

        D_path = ivim_flow.last_generated_outputs['out_D']
        D_data = load_nifti_data(D_path)
        assert_equal(D_data.shape, data_multi.shape[:-1])
Exemple #2
0
    def run(self,
            input_files,
            vol_idx=0,
            out_dir='',
            out_split='split.nii.gz'):
        """ Splits the input 4D file and extracts the required 3D volume.

        Parameters
        ----------
        input_files : variable string
            Any number of Nifti1 files
        vol_idx : int, optional
        out_dir : string, optional
            Output directory. (default current directory)
        out_split : string, optional
            Name of the resulting split volume

        """
        io_it = self.get_io_iterator()
        for fpath, osplit in io_it:
            logging.info('Splitting {0}'.format(fpath))
            data, affine, image = load_nifti(fpath, return_img=True)

            if vol_idx == 0:
                logging.info('Splitting and extracting 1st b0')

            split_vol = data[..., vol_idx]
            save_nifti(osplit, split_vol, affine, image.header)

            logging.info('Split volume saved as {0}'.format(osplit))
Exemple #3
0
def test_bundle_shape_analysis_flow():

    with TemporaryDirectory() as dirpath:
        data_path = get_fnames('fornix')
        fornix = load_tractogram(data_path, 'same',
                                 bbox_valid_check=False).streamlines

        f = Streamlines(fornix)

        mb = os.path.join(dirpath, "model_bundles")
        sub = os.path.join(dirpath, "subjects")

        os.mkdir(mb)
        sft = StatefulTractogram(f, data_path, Space.RASMM)
        save_tractogram(sft,
                        os.path.join(mb, "temp.trk"),
                        bbox_valid_check=False)

        os.mkdir(sub)

        os.mkdir(os.path.join(sub, "patient"))

        os.mkdir(os.path.join(sub, "control"))

        p = os.path.join(sub, "patient", "10001")
        os.mkdir(p)

        c = os.path.join(sub, "control", "20002")
        os.mkdir(c)

        for pre in [p, c]:

            os.mkdir(os.path.join(pre, "rec_bundles"))

            sft = StatefulTractogram(f, data_path, Space.RASMM)
            save_tractogram(sft,
                            os.path.join(pre, "rec_bundles", "temp.trk"),
                            bbox_valid_check=False)
            os.mkdir(os.path.join(pre, "org_bundles"))

            sft = StatefulTractogram(f, data_path, Space.RASMM)
            save_tractogram(sft,
                            os.path.join(pre, "org_bundles", "temp.trk"),
                            bbox_valid_check=False)
            os.mkdir(os.path.join(pre, "anatomical_measures"))

            fa = np.random.rand(255, 255, 255)

            save_nifti(os.path.join(pre, "anatomical_measures", "fa.nii.gz"),
                       fa,
                       affine=np.eye(4))

        out_dir = os.path.join(dirpath, "output")
        os.mkdir(out_dir)

        sm_flow = BundleShapeAnalysis()

        sm_flow.run(sub, out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, "temp.npy")))
Exemple #4
0
    def run(self,
            input_files,
            lb,
            ub=np.inf,
            out_dir='',
            out_mask='mask.nii.gz'):
        """ Workflow for creating a binary mask

        Parameters
        ----------
        input_files : string
           Path to image to be masked.
        lb : float
            Lower bound value.
        ub : float, optional
            Upper bound value (default Inf)
        out_dir : string, optional
           Output directory (default input file directory)
        out_mask : string, optional
           Name of the masked file (default 'mask.nii.gz')
        """
        if lb >= ub:
            logging.error('The upper bound(less than) should be greater'
                          ' than the lower bound (greather_than).')
            return

        io_it = self.get_io_iterator()

        for input_path, out_mask_path in io_it:
            logging.info('Creating mask of {0}'.format(input_path))
            data, affine = load_nifti(input_path)
            mask = np.bitwise_and(data > lb, data < ub)
            save_nifti(out_mask_path, mask.astype(np.ubyte), affine)
            logging.info('Mask saved at {0}'.format(out_mask_path))
Exemple #5
0
    def run(self, input_files, lb, ub=np.inf, out_dir='',
            out_mask='mask.nii.gz'):

        """ Workflow for creating a binary mask

        Parameters
        ----------
        input_files : string
           Path to image to be masked.
        lb : float
            Lower bound value.
        ub : float
            Upper bound value (default Inf)
        out_dir : string, optional
           Output directory (default input file directory)
        out_mask : string, optional
           Name of the masked file (default 'mask.nii.gz')
        """
        if lb >= ub:
            logging.error('The upper bound(less than) should be greater'
                          ' than the lower bound (greather_than).')
            return

        io_it = self.get_io_iterator()

        for input_path, out_mask_path in io_it:
            logging.info('Creating mask of {0}'.format(input_path))
            data, affine = load_nifti(input_path)
            mask = np.bitwise_and(data > lb, data < ub)
            save_nifti(out_mask_path, mask.astype(np.ubyte), affine)
            logging.info('Mask saved at {0}'.format(out_mask_path))
Exemple #6
0
def peaks_to_niftis(pam,
                    fname_shm,
                    fname_dirs,
                    fname_values,
                    fname_indices,
                    fname_gfa,
                    reshape_dirs=False):
        """ Save SH, directions, indices and values of peaks to Nifti.
        """

        save_nifti(fname_shm, pam.shm_coeff.astype(np.float32), pam.affine)

        if reshape_dirs:
            pam_dirs = reshape_peaks_for_visualization(pam)
        else:
            pam_dirs = pam.peak_dirs.astype(np.float32)

        save_nifti(fname_dirs, pam_dirs, pam.affine)

        save_nifti(fname_values, pam.peak_values.astype(np.float32),
                   pam.affine)

        save_nifti(fname_indices, pam.peak_indices, pam.affine)

        save_nifti(fname_gfa, pam.gfa, pam.affine)
def vessel_pop(DSC):
    # load DSC and get numbers of label
    cord_data, cord_affine = load_nifti(DSC)
    label_array, label_num = label(cord_data)
    print('cluster numbers from DSC are {}'.format(label_num))
    while label_num > 1:
        # while label number > 1, save labeled seg and get DSC_vessel, mask it to CZ4 and then sub such vessel volume from CZ4
        label_mask = DSC[:-7] + '_label.nii.gz'
        save_nifti(label_mask, label_array, cord_affine)
        vessel = DSC[:-7] + '_vessel.nii.gz'
        call(['fslmaths', label_mask, '-thr', '2', vessel])
        CZ4_vessel = CZ4[:-7] + '_vessel.nii.gz'
        call(['fslmaths', CZ4, '-mas', vessel, CZ4_vessel])
        CZ4_no_vessel = CZ4[:-7] + '_no_vessel.nii.gz'
        call(['fslmaths', CZ4, '-sub', CZ4_vessel, CZ4_no_vessel])
        # run deepseg again based on CZ4_no_vessel
        qc = os.path.join(path, 'qc')
        call([
            'sct_deepseg_sc', '-i', CZ4_no_vessel, '-c', 't2', '-ofolder',
            path, '-qc', qc
        ])
        call(['rm', '-r', qc])
        re_DSC = CZ4_no_vessel[:-7] + '_seg.nii.gz'
        data, affine = load_nifti(re_DSC)
        label_array, label_num = label(data)
        DSC = re_DSC
        print(DSC)
        print('final label_num is {}'.format(label_num))
        call(['rm', label_mask, vessel, CZ4_vessel])
    return DSC
Exemple #8
0
def test_mppca_flow():
    with TemporaryDirectory() as out_dir:
        S0 = 100 + 2 * np.random.standard_normal((22, 23, 30, 20))
        data_path = os.path.join(out_dir, "random_noise.nii.gz")
        save_nifti(data_path, S0, np.eye(4))

        mppca_flow = MPPCAFlow()
        mppca_flow.run(data_path, out_dir=out_dir)
        assert_true(os.path.isfile(
                mppca_flow.last_generated_outputs['out_denoised']))
        assert_false(os.path.isfile(
                mppca_flow.last_generated_outputs['out_sigma']))

        mppca_flow._force_overwrite = True
        mppca_flow.run(data_path, return_sigma=True, pca_method='svd',
                       out_dir=out_dir)
        assert_true(os.path.isfile(
                mppca_flow.last_generated_outputs['out_denoised']))
        assert_true(os.path.isfile(
                mppca_flow.last_generated_outputs['out_sigma']))

        denoised_path = mppca_flow.last_generated_outputs['out_denoised']
        denoised_data = load_nifti_data(denoised_path)
        assert_greater(denoised_data.min(), S0.min())
        assert_less(denoised_data.max(), S0.max())
        npt.assert_equal(np.round(denoised_data.mean()), 100)
Exemple #9
0
def test_gibbs_flow():
    def generate_slice():
        Nori = 32
        image = np.zeros((6 * Nori, 6 * Nori))
        image[Nori: 2 * Nori, Nori: 2 * Nori] = 1
        image[Nori: 2 * Nori, 4 * Nori: 5 * Nori] = 1
        image[2 * Nori: 3 * Nori, Nori: 3 * Nori] = 1
        image[3 * Nori: 4 * Nori, 2 * Nori: 3 * Nori] = 2
        image[3 * Nori: 4 * Nori, 4 * Nori: 5 * Nori] = 1
        image[4 * Nori: 5 * Nori, 3 * Nori: 5 * Nori] = 3

        # Corrupt image with gibbs ringing
        c = np.fft.fft2(image)
        c = np.fft.fftshift(c)
        c_crop = c[48:144, 48:144]
        image_gibbs = abs(np.fft.ifft2(c_crop)/4)
        return image_gibbs

    with TemporaryDirectory() as out_dir:
        image4d = np.zeros((96, 96, 2, 2))
        image4d[:, :, 0, 0] = generate_slice()
        image4d[:, :, 1, 0] = generate_slice()
        image4d[:, :, 0, 1] = generate_slice()
        image4d[:, :, 1, 1] = generate_slice()
        data_path = os.path.join(out_dir, "random_noise.nii.gz")
        save_nifti(data_path, image4d, np.eye(4))

        gibbs_flow = GibbsRingingFlow()
        gibbs_flow.run(data_path, out_dir=out_dir)
        assert_true(os.path.isfile(
                gibbs_flow.last_generated_outputs['out_unring']))
Exemple #10
0
def get_FA_MD():
    time0 = time.time()

    data, affine = load_nifti('normalized_pDWI.nii.gz')
    bvals, bvecs = read_bvals_bvecs('DWI.bval', 'DWI.bvec')
    gtab = gradient_table(bvals, bvecs)
    #head_mask = load_nifti_data(data_path + '/' + brain_mask)

    print(data.shape)
    print('begin modeling!, time:', time.time() - time0)

    tenmodel = dti.TensorModel(gtab)
    tenfit = tenmodel.fit(data)

    from dipy.reconst.dti import fractional_anisotropy
    print('begin calculating FA!, time:', time.time() - time0)

    FA = fractional_anisotropy(tenfit.evals)

    FA[np.isnan(FA)] = 0
    #FA = FA * head_mask
    save_nifti('FA.nii.gz', FA.astype(np.float32), affine)

    # print('begin calculating MD!, time:', time.time() - time0)
    MD1 = dti.mean_diffusivity(tenfit.evals)
    #MD1 = MD1*head_mask
    save_nifti('MD.nii.gz', MD1.astype(np.float32), affine)

    print('Over!, time:', time.time() - time0)

    return FA, MD1
Exemple #11
0
def test_bundle_analysis_population_flow():

    with TemporaryDirectory() as dirpath:

        streams, hdr = nib.trackvis.read(get_fnames('fornix'))
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)

        mb = os.path.join(dirpath, "model_bundles")
        sub = os.path.join(dirpath, "subjects")

        os.mkdir(mb)
        save_trk(os.path.join(mb, "temp.trk"), f, affine=np.eye(4))

        os.mkdir(sub)

        os.mkdir(os.path.join(sub, "patient"))

        os.mkdir(os.path.join(sub, "control"))

        p = os.path.join(sub, "patient", "10001")
        os.mkdir(p)

        c = os.path.join(sub, "control", "20002")
        os.mkdir(c)

        for pre in [p, c]:

            os.mkdir(os.path.join(pre, "rec_bundles"))

            save_trk(os.path.join(pre, "rec_bundles", "temp.trk"), f,
                     affine=np.eye(4))

            os.mkdir(os.path.join(pre, "org_bundles"))

            save_trk(os.path.join(pre, "org_bundles", "temp.trk"), f,
                     affine=np.eye(4))
            os.mkdir(os.path.join(pre, "measures"))

            fa = np.random.rand(255, 255, 255)

            save_nifti(os.path.join(pre, "measures", "fa.nii.gz"),
                       fa, affine=np.eye(4))

        out_dir = os.path.join(dirpath, "output")
        os.mkdir(out_dir)

        ba_flow = BundleAnalysisPopulationFlow()

        ba_flow.run(mb, sub, out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5')))

        dft = pd.read_hdf(os.path.join(out_dir, 'fa.h5'))

        assert_true(dft.bundle.unique() == "temp")

        assert_true(set(dft.subject.unique()) == set(['10001', '20002']))
Exemple #12
0
def downsample(subject_folder):
    print('Down-sampling in ', subject_folder)
    # load 4D volume
    data_folder = subject_folder + 'T1w/Diffusion/'
    low_res_folder = subject_folder + 'T1w/Diffusion_low_res/'

    # make a folder to save new data into
    try:
        Path(low_res_folder).mkdir(parents=True, exist_ok=True)
    except OSError:
        print('Could not create output dir. Aborting...')
        return

    # load bvals and make binary mask (True for b = 1000)
    with open(data_folder + 'bvals') as f:
        bvals = [int(x) for x in next(f).split()]
    mask_b1000 = [i == 1000 for i in bvals]

    bvals = np.asarray(bvals)[mask_b1000]
    bvals_low_res_file = low_res_folder + 'bvals'
    if Path(bvals_low_res_file).exists():
        remove(bvals_low_res_file)
    with open(bvals_low_res_file, 'x') as f:
        f.write(' '.join(map(str, bvals)))

    # load bvecs
    bvecs_low_res_file = low_res_folder + 'bvecs'
    if Path(bvecs_low_res_file).exists():
        remove(bvecs_low_res_file)
    new_file = open(bvecs_low_res_file, 'x')
    with open(data_folder + 'bvecs') as f:
        for line in f:
            # read line and mask it
            new_coords = np.asarray([float(x)
                                     for x in line.split()])[mask_b1000]
            new_file.write(' '.join(map(str, new_coords)) + '\n')
    new_file.close()

    img = nib.load(data_folder + 'data.nii.gz')
    affine = img.affine
    zooms = img.header.get_zooms()[:3]
    data = np.asarray(img.dataobj)
    data = np.einsum('xyzb->bxyz', data)
    data = data[mask_b1000]
    data = np.einsum('bxyz->xyzb', data)

    new_zooms = (2.5, 2.5, 2.5)
    new_data, new_affine = reslice(data, affine, zooms, new_zooms)
    print('Down-sampled to shape ', new_data.shape)
    save_nifti(low_res_folder + 'data.nii.gz', new_data, new_affine)

    mask_img = nib.load(data_folder + 'nodif_brain_mask.nii.gz')
    mask = np.asarray(mask_img.dataobj)
    mask_zooms = mask_img.header.get_zooms()[:3]
    mask_affine = mask_img.affine
    new_mask, new_maks_affine = reslice(mask, mask_affine, mask_zooms,
                                        new_zooms)
    save_nifti(low_res_folder + 'nodif_brain_mask.nii.gz', new_mask,
               new_maks_affine)
Exemple #13
0
    def run(self,
            input_files,
            new_vox_size,
            order=1,
            mode='constant',
            cval=0,
            num_processes=1,
            out_dir='',
            out_resliced='resliced.nii.gz'):
        """Reslice data with new voxel resolution defined by ``new_vox_sz``

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        new_vox_size : variable float
            new voxel size
        order : int, optional
            order of interpolation, from 0 to 5, for resampling/reslicing,
            0 nearest interpolation, 1 trilinear etc.. if you don't want any
            smoothing 0 is the option you need (default 1)
        mode : string, optional
            Points outside the boundaries of the input are filled according
            to the given mode 'constant', 'nearest', 'reflect' or 'wrap'
            (default 'constant')
        cval : float, optional
            Value used for points outside the boundaries of the input if
            mode='constant' (default 0)
        num_processes : int, optional
            Split the calculation to a pool of children processes. This only
            applies to 4D `data` arrays. If a positive integer then it defines
            the size of the multiprocessing pool that will be used. If 0, then
            the size of the pool will equal the number of cores available.
            (default 1)
        out_dir : string, optional
            Output directory (default input file directory)
        out_resliced : string, optional
            Name of the resliced dataset to be saved
            (default 'resliced.nii.gz')
        """

        io_it = self.get_io_iterator()

        for inputfile, outpfile in io_it:

            data, affine, vox_sz = load_nifti(inputfile, return_voxsize=True)
            logging.info('Processing {0}'.format(inputfile))
            new_data, new_affine = reslice(data,
                                           affine,
                                           vox_sz,
                                           new_vox_size,
                                           order,
                                           mode=mode,
                                           cval=cval,
                                           num_processes=num_processes)
            save_nifti(outpfile, new_data, new_affine)
            logging.info('Resliced file save in {0}'.format(outpfile))
def median_mask_make(inpath, outpath, outpathmask=None, median_radius=4, numpass=4,binary_dilation=None):

    if outpathmask is None:
        outpathmask=outpath.replace(".nii","_mask.nii")
    data, affine = load_nifti(inpath)
    data = np.squeeze(data)
    data_masked, mask = median_otsu(data, median_radius=median_radius, numpass=numpass, dilate=binary_dilation)
    save_nifti(outpath, data_masked.astype(np.float32), affine)
    save_nifti(outpathmask, mask.astype(np.float32), affine)
Exemple #15
0
    def run(self, input_files, sigma=0, patch_radius=1, block_radius=5,
            rician=True, out_dir='', out_denoised='dwi_nlmeans.nii.gz'):
        """Workflow wrapping the nlmeans denoising method.

        It applies nlmeans denoise on each file found by 'globing'
        ``input_files`` and saves the results in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        sigma : float, optional
            Sigma parameter to pass to the nlmeans algorithm
            (default: auto estimation).
        patch_radius : int, optional
            patch size is ``2 x patch_radius + 1``. Default is 1.
        block_radius : int, optional
            block size is ``2 x block_radius + 1``. Default is 5.
        rician : bool, optional
            If True the noise is estimated as Rician, otherwise Gaussian noise
            is assumed.
        out_dir : string, optional
            Output directory (default input file directory)
        out_denoised : string, optional
            Name of the resulting denoised volume (default: dwi_nlmeans.nii.gz)

        References
        ----------
        .. [Descoteaux08] Descoteaux, Maxime and Wiest-Daesslé, Nicolas and
        Prima, Sylvain and Barillot, Christian and Deriche, Rachid.
        Impact of Rician Adapted Non-Local Means Filtering on
        HARDI, MICCAI 2008

        """
        io_it = self.get_io_iterator()
        for fpath, odenoised in io_it:
            if self._skip:
                shutil.copy(fpath, odenoised)
                logging.warning('Denoising skipped for now.')
            else:
                logging.info('Denoising %s', fpath)
                data, affine, image = load_nifti(fpath, return_img=True)

                if sigma == 0:
                    logging.info('Estimating sigma')
                    sigma = estimate_sigma(data)
                    logging.debug('Found sigma {0}'.format(sigma))

                denoised_data = nlmeans(data, sigma=sigma,
                                        patch_radius=patch_radius,
                                        block_radius=block_radius,
                                        rician=rician)
                save_nifti(odenoised, denoised_data, affine, image.header)

                logging.info('Denoised volume saved as %s', odenoised)
Exemple #16
0
    def run(self, input_files, bvalues_files, bvectors_files, b0_threshold=50,
            bvecs_tol=0.01, out_dir='', out_moved='moved.nii.gz',
            out_affine='affine.txt'):
        """
        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        bvalues_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        bvectors_files : string
            Path to the bvectors files. This path may contain wildcards to use
            multiple bvectors files at once.
        b0_threshold : float, optional
            Threshold used to find b0 volumes.
        bvecs_tol : float, optional
            Threshold used to check that norm(bvec) = 1 +/- bvecs_tol
            b-vectors are unit vectors
        out_dir : string, optional
            Directory to save the transformed image and the affine matrix
             (default current directory).
        out_moved : string, optional
            Name for the saved transformed image.
        out_affine : string, optional
            Name for the saved affine matrix.
        """

        io_it = self.get_io_iterator()

        for dwi, bval, bvec, omoved, oafffine in io_it:

            # Load the data from the input files and store into objects.
            logging.info('Loading {0}'.format(dwi))
            data, affine = load_nifti(dwi)

            bvals, bvecs = read_bvals_bvecs(bval, bvec)

            if b0_threshold < bvals.min():
                warn("b0_threshold (value: {0}) is too low, increase your "
                     "b0_threshold. It should be higher than the lowest "
                     "b0 value ({1}).".format(b0_threshold, bvals.min()))
            gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold,
                                  atol=bvecs_tol)

            reg_img, reg_affines = motion_correction(data=data, gtab=gtab,
                                                     affine=affine)

            # Saving the corrected image file
            save_nifti(omoved, reg_img.get_fdata(), affine)
            # Write the affine matrix array to disk
            with open(oafffine, 'w') as outfile:
                outfile.write('# Array shape: {0}\n'.format(reg_affines.shape))
                for affine_slice in reg_affines:
                    np.savetxt(outfile, affine_slice, fmt='%-7.2f')
                    outfile.write('# New slice\n')
Exemple #17
0
    def run(self, input_files, bval_files, model='ridge', verbose=False,
            out_dir='', out_denoised='dwi_patch2self.nii.gz'):
        """Workflow for Patch2Self denoising method.

        It applies patch2self denoising on each file found by 'globing'
        ``input_file`` and ``bval_file``. It saves the results in a directory
        specified by ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        bval_files : string
            bval file associated with the diffusion data.
        model : string, or initialized linear model object.
            This will determine the algorithm used to solve the set of linear
            equations underlying this model. If it is a string it needs to be
            one of the following: {'ols', 'ridge', 'lasso'}. Otherwise,
            it can be an object that inherits from
            `dipy.optimize.SKLearnLinearSolver` or an object with a similar
            interface from Scikit-Learn:
            `sklearn.linear_model.LinearRegression`,
            `sklearn.linear_model.Lasso` or `sklearn.linear_model.Ridge`
            and other objects that inherit from `sklearn.base.RegressorMixin`.
            Default: 'ridge'.
        verbose : bool, optional
            Show progress of Patch2Self and time taken.
        out_dir : string, optional
            Output directory (default current directory)
        out_denoised : string, optional
            Name of the resulting denoised volume
            (default: dwi_patch2self.nii.gz)

        References
        ----------
        .. [Fadnavis20] S. Fadnavis, J. Batson, E. Garyfallidis, Patch2Self:
                    Denoising Diffusion MRI with Self-supervised Learning,
                    Advances in Neural Information Processing Systems 33 (2020)

        """
        io_it = self.get_io_iterator()
        for fpath, bvalpath, odenoised in io_it:
            if self._skip:
                shutil.copy(fpath, odenoised)
                logging.warning('Denoising skipped for now.')
            else:
                logging.info('Denoising %s', fpath)
                data, affine, image = load_nifti(fpath, return_img=True)
                bvals = np.loadtxt(bvalpath)

                denoised_data = patch2self(data, bvals, model=model,
                                           verbose=verbose)
                save_nifti(odenoised, denoised_data, affine, image.header)

                logging.info('Denoised volumes saved as %s', odenoised)
Exemple #18
0
    def run(self, input_files, patch_radius=2, pca_method='eig',
            return_sigma=False, out_dir='', out_denoised='dwi_mppca.nii.gz',
            out_sigma='dwi_sigma.nii.gz'):
        r"""Workflow wrapping Marcenko-Pastur PCA denoising method.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        patch_radius : variable int, optional
            The radius of the local patch to be taken around each voxel (in
            voxels) For example, for a patch radius with value 2, and assuming
            the input image is a 3D image, the denoising will take place in
            blocks of 5x5x5 voxels.
        pca_method : string, optional
            Use either eigenvalue decomposition ('eig') or singular value
            decomposition ('svd') for principal component analysis. The default
            method is 'eig' which is faster. However, occasionally 'svd' might
            be more accurate.
        return_sigma : bool, optional
            If true, a noise standard deviation estimate based on the
            Marcenko-Pastur distribution is returned [2]_.
        out_dir : string, optional
            Output directory. (default current directory)
        out_denoised : string, optional
            Name of the resulting denoised volume.
        out_sigma : string, optional
            Name of the resulting sigma volume.

        References
        ----------
        .. [1] Veraart J, Novikov DS, Christiaens D, Ades-aron B, Sijbers,
        Fieremans E, 2016. Denoising of Diffusion MRI using random matrix
        theory. Neuroimage 142:394-406.
        doi: 10.1016/j.neuroimage.2016.08.016

        .. [2] Veraart J, Fieremans E, Novikov DS. 2016. Diffusion MRI noise
        mapping using random matrix theory. Magnetic Resonance in Medicine.
        doi: 10.1002/mrm.26059.

        """
        io_it = self.get_io_iterator()
        for dwi, odenoised, osigma in io_it:
            logging.info('Denoising %s', dwi)
            data, affine, image = load_nifti(dwi, return_img=True)

            denoised_data, sigma = mppca(data, patch_radius=patch_radius,
                                         pca_method=pca_method,
                                         return_sigma=True)

            save_nifti(odenoised, denoised_data, affine, image.header)
            logging.info('Denoised volume saved as %s', odenoised)
            if return_sigma:
                save_nifti(osigma, sigma, affine, image.header)
                logging.info('Sigma volume saved as %s', osigma)
def test_ba():

    with TemporaryDirectory() as dirpath:
        data_path = get_fnames('fornix')
        fornix = load_tractogram(data_path, 'same',
                                 bbox_valid_check=False).streamlines

        f = Streamlines(fornix)

        mb = os.path.join(dirpath, "model_bundles")

        os.mkdir(mb)

        sft = StatefulTractogram(f, data_path, Space.RASMM)
        save_tractogram(sft,
                        os.path.join(mb, "temp.trk"),
                        bbox_valid_check=False)

        rb = os.path.join(dirpath, "rec_bundles")
        os.mkdir(rb)

        sft = StatefulTractogram(f, data_path, Space.RASMM)
        save_tractogram(sft,
                        os.path.join(rb, "temp.trk"),
                        bbox_valid_check=False)

        ob = os.path.join(dirpath, "org_bundles")
        os.mkdir(ob)

        sft = StatefulTractogram(f, data_path, Space.RASMM)
        save_tractogram(sft,
                        os.path.join(ob, "temp.trk"),
                        bbox_valid_check=False)

        dt = os.path.join(dirpath, "dti_measures")
        os.mkdir(dt)

        fa = np.random.rand(255, 255, 255)

        save_nifti(os.path.join(dt, "fa.nii.gz"), fa, affine=np.eye(4))

        out_dir = os.path.join(dirpath, "output")
        os.mkdir(out_dir)

        bundle_analysis(mb,
                        rb,
                        ob,
                        dt,
                        group="patient",
                        subject="10001",
                        no_disks=100,
                        out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5')))
Exemple #20
0
    def run(self, static_image_file, moving_image_files, affine_matrix_file,
            out_dir='', out_file='transformed.nii.gz'):

        """
        Parameters
        ----------
        static_image_file : string
            Path of the static image file.

        moving_image_files : string
            Path of the moving image(s). It can be a single image or a
            folder containing multiple images.

        affine_matrix_file : string
            The text file containing the affine matrix for transformation.

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_file : string, optional
            Name of the transformed file (default 'transformed.nii.gz').
             It is recommended to use the flag --mix-names to
              prevent the output files from being overwritten.

        """
        io = self.get_io_iterator()

        for static_image_file, moving_image_file, affine_matrix_file, \
                out_file in io:

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_image_file)

            moving_image, moving_grid2world = load_nifti(moving_image_file)

            # Doing a sanity check for validating the dimensions of the input
            # images.
            ImageRegistrationFlow.check_dimensions(static_image, moving_image)

            # Loading the affine matrix.
            affine_matrix = np.loadtxt(affine_matrix_file)

            # Setting up the affine transformation object.
            img_transformation = AffineMap(
                affine=affine_matrix,
                domain_grid_shape=static_image.shape,
                domain_grid2world=static_grid2world,
                codomain_grid_shape=moving_image.shape,
                codomain_grid2world=moving_grid2world)

            # Transforming the image/
            transformed = img_transformation.transform(moving_image)

            save_nifti(out_file, transformed, affine=static_grid2world)
Exemple #21
0
    def run(self,
            input_files,
            slice_axis=2,
            n_points=3,
            num_processes=1,
            out_dir='',
            out_unring='dwi_unrig.nii.gz'):
        r"""Workflow for applying Gibbs Ringing method.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        slice_axis : int, optional
            Data axis corresponding to the number of acquired slices.
            Could be (0, 1, or 2): for example, a value of 2 would mean the
            third axis.
        n_points : int, optional
            Number of neighbour points to access local TV (see note).
        num_processes : int or None, optional
            Split the calculation to a pool of children processes. Only
            applies to 3D or 4D `data` arrays. Default is 1. If < 0 the maximal
            number of cores minus |num_processes + 1| is used (enter -1 to use
            as many cores as possible). 0 raises an error.
        out_dir : string, optional
            Output directory. (default current directory)
        out_unrig : string, optional
            Name of the resulting denoised volume.

        References
        ----------
        .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI
        Data Analysis and their Application to the Healthy Ageing Brain
        (Doctoral thesis). https://doi.org/10.17863/CAM.29356

        .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing
        artifact removal based on local subvoxel-shifts. Magn Reson Med. 2016
        doi: 10.1002/mrm.26054.

        """
        io_it = self.get_io_iterator()
        for dwi, ounring in io_it:
            logging.info('Unringing %s', dwi)
            data, affine, image = load_nifti(dwi, return_img=True)

            unring_data = gibbs_removal(data,
                                        slice_axis=slice_axis,
                                        n_points=n_points,
                                        num_processes=num_processes)

            save_nifti(ounring, unring_data, affine, image.header)
            logging.info('Denoised volume saved as %s', ounring)
Exemple #22
0
    def run(self,
            input_files,
            slice_axis=2,
            n_points=3,
            num_threads=1,
            out_dir='',
            out_unring='dwi_unrig.nii.gz'):
        r"""Workflow for applying Gibbs Ringing method.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        slice_axis : int, optional
            Data axis corresponding to the number of acquired slices.
            Default is set to the third axis(2). Could be (0, 1, or 2).
        n_points : int, optional
            Number of neighbour points to access local TV (see note).
            Default is set to 3.
        num_threads : int or None, optional
            Number of threads. Only applies to 3D or 4D `data` arrays. If None
            then all available threads will be used. Otherwise, must be a
            positive integer.
            Default is set to 1.
        out_dir : string, optional
            Output directory (default input file directory)
        out_unrig : string, optional
            Name of the resulting denoised volume (default: dwi_unrig.nii.gz)

        References
        ----------
        .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI
        Data Analysis and their Application to the Healthy Ageing Brain
        (Doctoral thesis). https://doi.org/10.17863/CAM.29356

        .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing
        artifact removal based on local subvoxel-shifts. Magn Reson Med. 2016
        doi: 10.1002/mrm.26054.

        """
        io_it = self.get_io_iterator()
        for dwi, ounring in io_it:
            logging.info('Unringing %s', dwi)
            data, affine, image = load_nifti(dwi, return_img=True)

            unring_data = gibbs_removal(data,
                                        slice_axis=slice_axis,
                                        n_points=n_points,
                                        num_threads=num_threads)

            save_nifti(ounring, unring_data, affine, image.header)
            logging.info('Denoised volume saved as %s', ounring)
Exemple #23
0
def reconst_flow_core(flow, extra_args=[], extra_kwargs={}):
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_fnames('small_25')
        volume, affine = load_nifti(data_path)
        mask = np.ones_like(volume[:, :, :, 0], dtype=np.uint8)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        save_nifti(mask_path, mask, affine)

        dti_flow = flow()

        args = [data_path, bval_path, bvec_path, mask_path]
        args.extend(extra_args)
        kwargs = dict(out_dir=out_dir)
        kwargs.update(extra_kwargs)

        dti_flow.run(*args, **kwargs)

        fa_path = dti_flow.last_generated_outputs['out_fa']
        fa_data = load_nifti_data(fa_path)
        assert_equal(fa_data.shape, volume.shape[:-1])

        tensor_path = dti_flow.last_generated_outputs['out_tensor']
        tensor_data = load_nifti_data(tensor_path)
        # Per default, tensor data is 5D, with six tensor elements on the last
        # dimension, except if nifti_tensor is set to False:
        if extra_kwargs.get('nifti_tensor', True):
            assert_equal(tensor_data.shape[-1], 6)
            assert_equal(tensor_data.shape[:-2], volume.shape[:-1])
        else:
            assert_equal(tensor_data.shape[-1], 6)
            assert_equal(tensor_data.shape[:-1], volume.shape[:-1])

        for out_name in ['out_ga', 'out_md', 'out_ad', 'out_rd', 'out_mode']:
            out_path = dti_flow.last_generated_outputs[out_name]
            out_data = load_nifti_data(out_path)
            assert_equal(out_data.shape, volume.shape[:-1])

        rgb_path = dti_flow.last_generated_outputs['out_rgb']
        rgb_data = load_nifti_data(rgb_path)
        assert_equal(rgb_data.shape[-1], 3)
        assert_equal(rgb_data.shape[:-1], volume.shape[:-1])

        evecs_path = dti_flow.last_generated_outputs['out_evec']
        evecs_data = load_nifti_data(evecs_path)
        assert_equal(evecs_data.shape[-2:], tuple((3, 3)))
        assert_equal(evecs_data.shape[:-2], volume.shape[:-1])

        evals_path = dti_flow.last_generated_outputs['out_eval']
        evals_data = load_nifti_data(evals_path)
        assert_equal(evals_data.shape[-1], 3)
        assert_equal(evals_data.shape[:-1], volume.shape[:-1])
Exemple #24
0
def re_sample(input_path_img, input_path_label, output_path_img, out_path_label, new_voxel_size):

    data, affine, voxel_size = load_nifti(input_path_img, return_voxsize=True)
    label, affine_label, voxel_size1 = load_nifti(input_path_label, return_voxsize=True)
    print('Before', data.shape, voxel_size)
    print('label Before', label.shape, voxel_size1)

    data2, affine2 = reslice(data, affine, voxel_size, new_voxel_size)
    print('After resample:', data2.shape, new_voxel_size)

    label2, affine_label2 = reslice(label, affine_label, voxel_size1, new_voxel_size)
    print('label After resample:', label2.shape, new_voxel_size)

    save_nifti(output_path_img, data2, affine2)
    save_nifti(out_path_label, label2, affine_label2)
Exemple #25
0
def downsample_segmentations(subject):
    print('Downsampling for ' + subject)

    folder = data_path + 'tractseg/' + subject + '/tracts/'
    for t in tracts:
        img = nib.load(folder + t + '.nii.gz')
        data = np.asarray(img.dataobj)
        zooms = img.header.get_zooms()[:3]
        affine = img.affine
        new_data, new_affine = reslice(data,
                                       affine,
                                       zooms,
                                       new_zooms=[2.5, 2.5, 2.5])
        save_nifti(folder + t + '_low_res.nii.gz', new_data, new_affine)
        print('\rSaved ' + t + '         ', end='')
    print('')
Exemple #26
0
    def run(self, input_files, new_vox_size, order=1, mode='constant', cval=0,
            num_processes=1, out_dir='', out_resliced='resliced.nii.gz'):
        """Reslice data with new voxel resolution defined by ``new_vox_sz``
    
        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        new_vox_size : variable float
            new voxel size
        order : int, optional
            order of interpolation, from 0 to 5, for resampling/reslicing,
            0 nearest interpolation, 1 trilinear etc.. if you don't want any 
            smoothing 0 is the option you need (default 1)
        mode : string, optional
            Points outside the boundaries of the input are filled according
            to the given mode 'constant', 'nearest', 'reflect' or 'wrap'
            (default 'constant')
        cval : float, optional
            Value used for points outside the boundaries of the input if
            mode='constant' (default 0)
        num_processes : int, optional
            Split the calculation to a pool of children processes. This only
            applies to 4D `data` arrays. If a positive integer then it defines
            the size of the multiprocessing pool that will be used. If 0, then
            the size of the pool will equal the number of cores available.
            (default 1)
        out_dir : string, optional
            Output directory (default input file directory)
        out_resliced : string, optional
            Name of the resliced dataset to be saved
            (default 'resliced.nii.gz')
        """
        
        io_it = self.get_io_iterator()

        for inputfile, outpfile in io_it:
            
            data, affine, vox_sz = load_nifti(inputfile, return_voxsize=True)
            logging.info('Processing {0}'.format(inputfile))
            new_data, new_affine = reslice(data, affine, vox_sz, new_vox_size,
                                           order, mode=mode, cval=cval,
                                           num_processes=num_processes)
            save_nifti(outpfile, new_data, new_affine)
            logging.info('Resliced file save in {0}'.format(outpfile))
Exemple #27
0
def test_ba():

    with TemporaryDirectory() as dirpath:

        streams, hdr = nib.trackvis.read(get_fnames('fornix'))
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)

        mb = os.path.join(dirpath, "model_bundles")

        os.mkdir(mb)

        save_trk(os.path.join(mb, "temp.trk"), f, affine=np.eye(4))

        rb = os.path.join(dirpath, "rec_bundles")
        os.mkdir(rb)

        save_trk(os.path.join(rb, "temp.trk"), f, affine=np.eye(4))

        ob = os.path.join(dirpath, "org_bundles")
        os.mkdir(ob)

        save_trk(os.path.join(ob, "temp.trk"), f, affine=np.eye(4))

        dt = os.path.join(dirpath, "dti_measures")
        os.mkdir(dt)

        fa = np.random.rand(255, 255, 255)

        save_nifti(os.path.join(dt, "fa.nii.gz"), fa, affine=np.eye(4))

        out_dir = os.path.join(dirpath, "output")
        os.mkdir(out_dir)

        bundle_analysis(mb,
                        rb,
                        ob,
                        dt,
                        group="patient",
                        subject="10001",
                        no_disks=100,
                        out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5')))
Exemple #28
0
def test_stats():
    with TemporaryDirectory() as out_dir:

        data_path, bval_path, bvec_path = get_fnames('small_101D')
        volume, affine = load_nifti(data_path)
        mask = np.ones_like(volume[:, :, :, 0], dtype=np.uint8)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        save_nifti(mask_path, mask, affine)

        snr_flow = SNRinCCFlow(force=True)
        args = [data_path, bval_path, bvec_path, mask_path]
        snr_flow.run(*args, out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, 'product.json')))
        assert_true(
            os.stat(os.path.join(out_dir, 'product.json')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz')))
        assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz')))
        assert_true(
            os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0)

        snr_flow._force_overwrite = True
        snr_flow.run(*args, out_dir=out_dir)
        assert_true(os.path.exists(os.path.join(out_dir, 'product.json')))
        assert_true(
            os.stat(os.path.join(out_dir, 'product.json')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz')))
        assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz')))
        assert_true(
            os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0)

        snr_flow._force_overwrite = True
        snr_flow.run(*args,
                     bbox_threshold=(0.5, 1, 0, 0.15, 0, 0.2),
                     out_dir=out_dir)
        assert_true(os.path.exists(os.path.join(out_dir, 'product.json')))
        assert_true(
            os.stat(os.path.join(out_dir, 'product.json')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz')))
        assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0)
        assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz')))
        assert_true(
            os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0)
Exemple #29
0
def write_mapping(mapping, fname):
    """
    Write out a syn registration mapping to a nifti file

    Parameters
    ----------
    mapping : a DiffeomorphicMap object derived from :func:`syn_registration`
    fname : str
        Full path to the nifti file storing the mapping

    Notes
    -----
    The data in the file is organized with shape (X, Y, Z, 2, 3, 3), such
    that the forward mapping in each voxel is in `data[i, j, k, 0, :, :]` and
    the backward mapping in each voxel is in `data[i, j, k, 0, :, :]`.
    """
    mapping_data = np.array([mapping.forward.T, mapping.backward.T]).T
    save_nifti(fname, mapping_data, mapping.codomain_world2grid)
def test_det_track():
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_data('small_64D')
        vol_img = nib.load(data_path)
        volume = vol_img.get_data()
        mask = np.ones_like(volume[:, :, :, 0])
        mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        nib.save(mask_img, mask_path)

        reconst_csd_flow = ReconstCSDFlow()
        reconst_csd_flow.run(data_path,
                             bval_path,
                             bvec_path,
                             mask_path,
                             out_dir=out_dir,
                             extract_pam_values=True)

        pam_path = reconst_csd_flow.last_generated_outputs['out_pam']
        gfa_path = reconst_csd_flow.last_generated_outputs['out_gfa']

        # Create seeding mask by thresholding the gfa
        mask_flow = MaskFlow()
        mask_flow.run(gfa_path, 0.8, out_dir=out_dir)
        seeds_path = mask_flow.last_generated_outputs['out_mask']

        # Put identity in gfa path to prevent impossible to use
        # local tracking because of affine containing shearing.
        gfa_img = nib.load(gfa_path)
        save_nifti(gfa_path, gfa_img.get_data(), np.eye(4), gfa_img.header)

        # Test tracking with pam no sh
        det_track_pam = DetTrackPAMFlow()
        assert_equal(det_track_pam.get_short_name(), 'det_track')
        det_track_pam.run(pam_path, gfa_path, seeds_path)
        tractogram_path = \
            det_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh
        det_track_pam.run(pam_path, gfa_path, seeds_path, use_sh=True)
        tractogram_path = \
            det_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))
Exemple #31
0
def predict(predict_sets, net, save_location):
    net.eval()

    for val_set in predict_sets:
        data = val_set['data']
        datashape = data.shape[1:]

        zeropad_shape = np.ceil(np.divide(datashape, 8)).astype(np.int) * 8

        p = zeropad_shape - datashape  # padding
        p_b = np.ceil(p / 2).astype(np.int)  # padding before image
        p_a = np.floor(p / 2).astype(np.int)  # padding after image

        data_pad = np.pad(data, ((0, 0), (p_b[0], p_a[0]), (p_b[1], p_a[1]),
                                 (p_b[2], p_a[2])),
                          mode='constant',
                          constant_values=((0, 0), (0, 0), (0, 0), (0, 0)))

        inputs = data_pad[:5, :, :, :]  # just use t1, t2, flair
        inputs = np.expand_dims(inputs, axis=0)
        inputs = torch.from_numpy(inputs)
        inputs = Variable(inputs).cuda()

        with torch.no_grad():
            outputs = net(inputs)

        # Bring predictions into correct shape and remove the zero-padded voxels
        predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(
            0).cpu().numpy()
        p_up = predictions.shape - p_a
        predictions = predictions[p_b[0]:p_up[0], p_b[1]:p_up[1],
                                  p_b[2]:p_up[2]]

        # Set all Voxels that are outside of the brainmask to 0
        mask = (data[0, :, :, :] != 0) | (data[1, :, :, :] != 0) | (
            data[3, :, :, :] != 0) | (data[4, :, :, :] != 0)
        predictions = np.multiply(predictions, mask)

        pred_orig_shape = to_original_shape(predictions, val_set)
        save_nifti(opj(save_location, val_set['name'] + '_pred.nii.gz'),
                   pred_orig_shape, val_set['affine'])
        print("Step1: %s  \tprediction complete" % val_set['name'])

    return
Exemple #32
0
def test_ba():

    with TemporaryDirectory() as dirpath:

        streams, hdr = nib.trackvis.read(get_fnames('fornix'))
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)

        mb = os.path.join(dirpath, "model_bundles")

        os.mkdir(mb)

        save_trk(os.path.join(mb, "temp.trk"),
                 f, affine=np.eye(4))

        rb = os.path.join(dirpath, "rec_bundles")
        os.mkdir(rb)

        save_trk(os.path.join(rb, "temp.trk"), f,
                 affine=np.eye(4))

        ob = os.path.join(dirpath, "org_bundles")
        os.mkdir(ob)

        save_trk(os.path.join(ob, "temp.trk"), f,
                 affine=np.eye(4))

        dt = os.path.join(dirpath, "dti_measures")
        os.mkdir(dt)

        fa = np.random.rand(255, 255, 255)

        save_nifti(os.path.join(dt, "fa.nii.gz"),
                   fa, affine=np.eye(4))

        out_dir = os.path.join(dirpath, "output")
        os.mkdir(out_dir)

        bundle_analysis(mb, rb, ob, dt, group="patient", subject="10001",
                        no_disks=100, out_dir=out_dir)

        assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5')))
Exemple #33
0
    def run(self,
            input_files,
            sigma=0,
            out_dir='',
            out_denoised='dwi_nlmeans.nii.gz'):
        """ Workflow wrapping the nlmeans denoising method.

        It applies nlmeans denoise on each file found by 'globing'
        ``input_files`` and saves the results in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        sigma : float, optional
            Sigma parameter to pass to the nlmeans algorithm
            (default: auto estimation).
        out_dir : string, optional
            Output directory (default input file directory)
        out_denoised : string, optional
            Name of the resulting denoised volume (default: dwi_nlmeans.nii.gz)
        """
        io_it = self.get_io_iterator()
        for fpath, odenoised in io_it:
            if self._skip:
                shutil.copy(fpath, odenoised)
                logging.warning('Denoising skipped for now.')
            else:
                logging.info('Denoising {0}'.format(fpath))
                data, affine, image = load_nifti(fpath, return_img=True)

                if sigma == 0:
                    logging.info('Estimating sigma')
                    sigma = estimate_sigma(data)
                    logging.debug('Found sigma {0}'.format(sigma))

                denoised_data = nlmeans(data, sigma)
                save_nifti(odenoised, denoised_data, affine, image.header)

                logging.info('Denoised volume saved as {0}'.format(odenoised))
Exemple #34
0
def make_tensorfit(data,
                   mask,
                   gtab,
                   affine,
                   subject,
                   outpath,
                   overwrite=False,
                   forcestart=False,
                   verbose=None):
    #Given dwi data, a mask, and other relevant information, creates the fa and saves it to outpath, unless
    #if it already exists, in which case it simply returns the fa

    from dipy.reconst.dti import TensorModel
    outpathbmfa, exists, _ = check_for_fa(outpath, subject, getdata=False)
    if exists and not forcestart:
        fa = load_nifti(outpathbmfa)
        fa_array = fa[0]
        if verbose:
            txt = "FA already computed at " + outpathbmfa
            print(txt)
        return outpathbmfa, fa_array
    else:
        if verbose:
            print('Calculating the tensor model from bval/bvec values of ',
                  subject)
        tensor_model = TensorModel(gtab)

        t1 = time()
        if len(mask.shape) == 4:
            mask = mask[..., 0]
        tensor_fit = tensor_model.fit(data, mask)

        duration1 = time() - t1
        if verbose:
            print(subject + ' DTI duration %.3f' % (duration1, ))

        save_nifti(outpathbmfa, tensor_fit.fa, affine)
        if verbose:
            print('Saving subject' + subject + ' at ' + outpathbmfa)

        return outpathbmfa, tensor_fit.fa
Exemple #35
0
def test_det_track():
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_data('small_64D')
        vol_img = nib.load(data_path)
        volume = vol_img.get_data()
        mask = np.ones_like(volume[:, :, :, 0])
        mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        nib.save(mask_img, mask_path)

        reconst_csd_flow = ReconstCSDFlow()
        reconst_csd_flow.run(data_path, bval_path, bvec_path, mask_path,
                             out_dir=out_dir, extract_pam_values=True)

        pam_path = reconst_csd_flow.last_generated_outputs['out_pam']
        gfa_path = reconst_csd_flow.last_generated_outputs['out_gfa']

        # Create seeding mask by thresholding the gfa
        mask_flow = MaskFlow()
        mask_flow.run(gfa_path, 0.8, out_dir=out_dir)
        seeds_path = mask_flow.last_generated_outputs['out_mask']

        # Put identity in gfa path to prevent impossible to use
        # local tracking because of affine containing shearing.
        gfa_img = nib.load(gfa_path)
        save_nifti(gfa_path, gfa_img.get_data(), np.eye(4), gfa_img.header)

        # Test tracking with pam no sh
        det_track_pam = DetTrackPAMFlow()
        assert_equal(det_track_pam.get_short_name(), 'det_track')
        det_track_pam.run(pam_path, gfa_path, seeds_path)
        tractogram_path = \
            det_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh
        det_track_pam.run(pam_path, gfa_path, seeds_path, use_sh=True)
        tractogram_path = \
            det_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))
Exemple #36
0
def convert_labelmask(atlas, converter, atlas_outpath = None, affine_labels=None):

    if isinstance(atlas, str):
        labels, affine_labels = load_nifti(atlas)
    else:
        if affine_labels is None:
            raise TypeError('Need to add the affine labels if directly including label array')
        else:
            labels = atlas

    labels_new = np.copy(labels)

    for i in range(np.shape(labels)[0]):
        for j in range(np.shape(labels)[1]):
            for k in range(np.shape(labels)[2]):
                try:
                    labels_new[i, j, k] = converter[labels[i, j, k]]
                except:
                    print('hi')

    save_nifti(atlas_outpath, labels_new, affine_labels)
    return(labels_new)
Exemple #37
0
    def run(self, input_files, sigma=0, out_dir='',
            out_denoised='dwi_nlmeans.nii.gz'):
        """ Workflow wrapping the nlmeans denoising method.

        It applies nlmeans denoise on each file found by 'globing'
        ``input_files`` and saves the results in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        sigma : float, optional
            Sigma parameter to pass to the nlmeans algorithm
            (default: auto estimation).
        out_dir : string, optional
            Output directory (default input file directory)
        out_denoised : string, optional
            Name of the resuting denoised volume (default: dwi_nlmeans.nii.gz)
        """
        io_it = self.get_io_iterator()
        for fpath, odenoised in io_it:
            if self._skip:
                shutil.copy(fpath, odenoised)
                logging.warning('Denoising skipped for now.')
            else:
                logging.info('Denoising {0}'.format(fpath))
                data, affine, image = load_nifti(fpath, return_img=True)

                if sigma == 0:
                    logging.info('Estimating sigma')
                    sigma = estimate_sigma(data)
                    logging.debug('Found sigma {0}'.format(sigma))

                denoised_data = nlmeans(data, sigma)
                save_nifti(odenoised, denoised_data, affine, image.header)

                logging.info('Denoised volume saved as {0}'.format(odenoised))
Exemple #38
0
    def run(self, static_img_file, moving_img_file, transform='affine',
            nbins=32, sampling_prop=None, metric='mi',
            level_iters=[10000, 1000, 100], sigmas=[3.0, 1.0, 0.0],
            factors=[4, 2, 1], progressive=True, save_metric=False,
            out_dir='', out_moved='moved.nii.gz', out_affine='affine.txt',
            out_quality='quality_metric.txt'):

        """
        Parameters
        ----------
        static_img_file : string
            Path to the static image file.

        moving_img_file : string
            Path to the moving image file.

        transform : string, optional
            com: center of mass, trans: translation, rigid: rigid body
             affine: full affine including translation, rotation, shearing and
             scaling (default 'affine').

        nbins : int, optional
            Number of bins to discretize the joint and marginal PDF
             (default '32').

        sampling_prop : int, optional
            Number ([0-100]) of voxels for calculating the PDF.
             'None' implies all voxels (default 'None').

        metric : string, optional
            Similarity metric for gathering mutual information
             (default 'mi' , Mutual Information metric).

        level_iters : variable int, optional
            The number of iterations at each scale of the scale space.
             `level_iters[0]` corresponds to the coarsest scale,
             `level_iters[-1]` the finest, where n is the length of the
              sequence. By default, a 3-level scale space with iterations
              sequence equal to [10000, 1000, 100] will be used.

        sigmas : variable floats, optional
            Custom smoothing parameter to build the scale space (one parameter
             for each scale). By default, the sequence of sigmas will be
             [3, 1, 0].

        factors : variable floats, optional
            Custom scale factors to build the scale space (one factor for each
             scale). By default, the sequence of factors will be [4, 2, 1].

        progressive : boolean, optional
            Enable/Disable the progressive registration (default 'True').

        save_metric : boolean, optional
            If true, quality assessment metric are saved in
            'quality_metric.txt' (default 'False').

        out_dir : string, optional
            Directory to save the transformed image and the affine matrix
             (default '').

        out_moved : string, optional
            Name for the saved transformed image
             (default 'moved.nii.gz').

        out_affine : string, optional
            Name for the saved affine matrix
             (default 'affine.txt').

        out_quality : string, optional
            Name of the file containing the saved quality
             metric (default 'quality_metric.txt').
        """

        io_it = self.get_io_iterator()
        transform = transform.lower()

        for static_img, mov_img, moved_file, affine_matrix_file, \
                qual_val_file in io_it:

            # Load the data from the input files and store into objects.
            image = nib.load(static_img)
            static = np.array(image.get_data())
            static_grid2world = image.affine

            image = nib.load(mov_img)
            moving = np.array(image.get_data())
            moving_grid2world = image.affine

            self.check_dimensions(static, moving)

            if transform == 'com':
                moved_image, affine = self.center_of_mass(static,
                                                          static_grid2world,
                                                          moving,
                                                          moving_grid2world)
            else:

                params0 = None
                if metric != 'mi':
                    raise ValueError("Invalid similarity metric: Please"
                                     " provide a valid metric.")
                metric = MutualInformationMetric(nbins, sampling_prop)

                """
                Instantiating the registration class with the configurations.
                """

                affreg = AffineRegistration(metric=metric,
                                            level_iters=level_iters,
                                            sigmas=sigmas,
                                            factors=factors)

                if transform == 'trans':
                    moved_image, affine, \
                        xopt, fopt = self.translate(static,
                                                    static_grid2world,
                                                    moving,
                                                    moving_grid2world,
                                                    affreg,
                                                    params0)

                elif transform == 'rigid':
                    moved_image, affine, \
                        xopt, fopt = self.rigid(static,
                                                static_grid2world,
                                                moving,
                                                moving_grid2world,
                                                affreg,
                                                params0,
                                                progressive)

                elif transform == 'affine':
                    moved_image, affine, \
                        xopt, fopt = self.affine(static,
                                                 static_grid2world,
                                                 moving,
                                                 moving_grid2world,
                                                 affreg,
                                                 params0,
                                                 progressive)
                else:
                    raise ValueError('Invalid transformation:'
                                     ' Please see program\'s help'
                                     ' for allowed values of'
                                     ' transformation.')

                """
                Saving the moved image file and the affine matrix.
                """
                logging.info("Optimal parameters: {0}".format(str(xopt)))
                logging.info("Similarity metric: {0}".format(str(fopt)))

                if save_metric:
                    save_qa_metric(qual_val_file, xopt, fopt)

            save_nifti(moved_file, moved_image, static_grid2world)
            np.savetxt(affine_matrix_file, affine)
Exemple #39
0
if rec_model == 'CSD':
    # Elef add CSD version and add MTMSCSD when is ready.
    pass

pam = peaks_from_model(model, data, sphere,
                       relative_peak_threshold=.8,
                       min_separation_angle=45,
                       mask=mask, parallel=parallel)




    
ten_model = TensorModel(gtab)
fa = ten_model.fit(data, mask).fa
save_nifti(ffa, fa, affine)                  

save_peaks(fpam5, pam,  affine)

show_odfs_and_fa(fa, pam, mask, None, sphere, ftmp='odf.mmap',
                 basis_type=None)
                  
pve_csf, pve_gm, pve_wm = pve[..., 0], pve[..., 1], pve[..., 2]

cmc_classifier = CmcTissueClassifier.from_pve(
        pve_wm,
        pve_gm,
        pve_csf,
        step_size=step_size,
        average_voxel_size=np.average(vox_size))
Exemple #40
0
    def run(self, data_files, bvals_files, bvecs_files, mask_files,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='',
            out_file='product.json', out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_files : string
            Path of brain mask
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            logging.info('Computing brain mask...')
            _, calc_mask = median_otsu(data)

            mask, affine = load_nifti(mask_path)
            mask = np.array(calc_mask == mask.astype(bool)).astype(int)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info(
                'Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0],
                   bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1]//2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0]/noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) +
                                 " " + str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction]/noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                        'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) +
                        ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                        'directions': 'b0' + ' ' + str(SNR_directions[0]) +
                        ' ' + str(SNR_directions[1]) + ' ' +
                        str(SNR_directions[2])
                        })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Exemple #41
0
 :align: center

 **Deterministic streamlines using EuDX (new framework)**

To learn more about this process you could start playing with the number of
seed points or, even better, specify seeds to be in specific regions of interest
in the brain.

Save the resulting streamlines in a Trackvis (.trk) format and FA as
Nifti (.nii.gz).
"""

save_trk(Tractogram(streamlines, affine_to_rasmm=img.affine),
         'det_streamlines.trk')

save_nifti('fa_map.nii.gz', fa, img.affine)

"""
In Windows if you get a runtime error about frozen executable please start
your script by adding your code above in a ``main`` function and use::

    if __name__ == '__main__':
        import multiprocessing
        multiprocessing.freeze_support()
        main()

References
----------

.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography",
   PhD thesis, University of Cambridge, 2012.
Exemple #42
0
    def run(self, static_image_files, moving_image_files, transform_map_file,
            transform_type='affine', out_dir='',
            out_file='transformed.nii.gz'):
        """
        Parameters
        ----------
        static_image_files : string
            Path of the static image file.

        moving_image_files : string
            Path of the moving image(s). It can be a single image or a
            folder containing multiple images.

        transform_map_file : string
            For the affine case, it should be a text(*.txt) file containing
            the affine matrix. For the diffeomorphic case,
            it should be a nifti file containing the mapping displacement
            field in each voxel with this shape (x, y, z, 3, 2)

        transform_type : string, optional
            Select the transformation type to apply between 'affine' or
            'diffeomorphic'. (default affine)

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_file : string, optional
            Name of the transformed file (default 'transformed.nii.gz').
             It is recommended to use the flag --mix-names to
              prevent the output files from being overwritten.

        """
        if transform_type.lower() not in ['affine', 'diffeomorphic']:
            raise ValueError("Invalid transformation type: Please"
                             " provide a valid transform like 'affine'"
                             " or 'diffeomorphic'")

        io = self.get_io_iterator()

        for static_image_file, moving_image_file, transform_file, \
                out_file in io:

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_image_file)
            moving_image, moving_grid2world = load_nifti(moving_image_file)

            # Doing a sanity check for validating the dimensions of the input
            # images.
            check_dimensions(static_image, moving_image)

            if transform_type.lower() == 'affine':
                # Loading the affine matrix.
                affine_matrix = np.loadtxt(transform_file)

                # Setting up the affine transformation object.
                mapping = AffineMap(
                    affine=affine_matrix,
                    domain_grid_shape=static_image.shape,
                    domain_grid2world=static_grid2world,
                    codomain_grid_shape=moving_image.shape,
                    codomain_grid2world=moving_grid2world)

            elif transform_type.lower() == 'diffeomorphic':
                # Loading the diffeomorphic map.
                disp = nib.load(transform_file)

                mapping = DiffeomorphicMap(
                    3, disp.shape[:3],
                    disp_grid2world=np.linalg.inv(disp.affine),
                    domain_shape=static_image.shape,
                    domain_grid2world=static_grid2world,
                    codomain_shape=moving_image.shape,
                    codomain_grid2world=moving_grid2world)

                disp_data = disp.get_data()
                mapping.forward = disp_data[..., 0]
                mapping.backward = disp_data[..., 1]
                mapping.is_inverse = True

            # Transforming the image/
            transformed = mapping.transform(moving_image)

            save_nifti(out_file, transformed, affine=static_grid2world)
Exemple #43
0
    def run(self, input_files, bvalues_files, bvectors_files, mask_files,
            b0_threshold=50.0, save_metrics=[],
            out_dir='', out_dt_tensor='dti_tensors.nii.gz', out_fa='fa.nii.gz',
            out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',
            out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',
            out_evec='evecs.nii.gz', out_eval='evals.nii.gz',
            out_dk_tensor="dki_tensors.nii.gz",
            out_mk="mk.nii.gz", out_ak="ak.nii.gz", out_rk="rk.nii.gz"):
        """ Workflow for Diffusion Kurtosis reconstruction and for computing
        DKI metrics. Performs a DKI reconstruction on the files by 'globing'
        ``input_files`` and saves the DKI metrics in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        bvalues_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        bvectors_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        mask_files : string
            Path to the input masks. This path may contain wildcards to use
            multiple masks at once. (default: No mask used)
        b0_threshold : float, optional
            Threshold used to find b=0 directions (default 0.0)
        save_metrics : variable string, optional
            List of metrics to save.
            Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval
            (default [] (all))
        out_dir : string, optional
            Output directory (default input file directory)
        out_dt_tensor : string, optional
            Name of the tensors volume to be saved
            (default: 'dti_tensors.nii.gz')
        out_dk_tensor : string, optional
            Name of the tensors volume to be saved
            (default 'dki_tensors.nii.gz')
        out_fa : string, optional
            Name of the fractional anisotropy volume to be saved
            (default 'fa.nii.gz')
        out_ga : string, optional
            Name of the geodesic anisotropy volume to be saved
            (default 'ga.nii.gz')
        out_rgb : string, optional
            Name of the color fa volume to be saved (default 'rgb.nii.gz')
        out_md : string, optional
            Name of the mean diffusivity volume to be saved
            (default 'md.nii.gz')
        out_ad : string, optional
            Name of the axial diffusivity volume to be saved
            (default 'ad.nii.gz')
        out_rd : string, optional
            Name of the radial diffusivity volume to be saved
            (default 'rd.nii.gz')
        out_mode : string, optional
            Name of the mode volume to be saved (default 'mode.nii.gz')
        out_evec : string, optional
            Name of the eigenvectors volume to be saved
            (default 'evecs.nii.gz')
        out_eval : string, optional
            Name of the eigenvalues to be saved (default 'evals.nii.gz')
        out_mk : string, optional
            Name of the mean kurtosis to be saved (default: 'mk.nii.gz')
        out_ak : string, optional
            Name of the axial kurtosis to be saved (default: 'ak.nii.gz')
        out_rk : string, optional
            Name of the radial kurtosis to be saved (default: 'rk.nii.gz')

        References
        ----------

        .. [1] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011.
           Estimation of tensors and tensor-derived measures in diffusional
           kurtosis imaging. Magn Reson Med. 65(3), 823-836

        .. [2] Jensen, Jens H., Joseph A. Helpern, Anita Ramani, Hanzhang Lu,
           and Kyle Kaczynski. 2005. Diffusional Kurtosis Imaging: The
           Quantification of Non-Gaussian Water Diffusion by Means of Magnetic
           Resonance Imaging. MRM 53 (6):1432-40.
        """
        io_it = self.get_io_iterator()

        for (dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad,
             omode, oevecs, oevals, odk_tensor, omk, oak, ork) in io_it:

            logging.info('Computing DKI metrics for {0}'.format(dwi))
            data, affine = load_nifti(dwi)

            if mask is not None:
                mask = nib.load(mask).get_data().astype(np.bool)

            dkfit, _ = self.get_fitted_tensor(data, mask, bval, bvec,
                                              b0_threshold)

            if not save_metrics:
                save_metrics = ['mk', 'rk', 'ak', 'fa', 'md', 'rd', 'ad', 'ga',
                                'rgb', 'mode', 'evec', 'eval', 'dt_tensor',
                                'dk_tensor']

            evals, evecs, kt = split_dki_param(dkfit.model_params)
            FA = fractional_anisotropy(evals)
            FA[np.isnan(FA)] = 0
            FA = np.clip(FA, 0, 1)

            if 'dt_tensor' in save_metrics:
                tensor_vals = lower_triangular(dkfit.quadratic_form)
                correct_order = [0, 1, 3, 2, 4, 5]
                tensor_vals_reordered = tensor_vals[..., correct_order]
                save_nifti(otensor, tensor_vals_reordered.astype(np.float32),
                           affine)

            if 'dk_tensor' in save_metrics:
                save_nifti(odk_tensor, dkfit.kt.astype(np.float32), affine)

            if 'fa' in save_metrics:
                save_nifti(ofa, FA.astype(np.float32), affine)

            if 'ga' in save_metrics:
                GA = geodesic_anisotropy(dkfit.evals)
                save_nifti(oga, GA.astype(np.float32), affine)

            if 'rgb' in save_metrics:
                RGB = color_fa(FA, dkfit.evecs)
                save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine)

            if 'md' in save_metrics:
                MD = mean_diffusivity(dkfit.evals)
                save_nifti(omd, MD.astype(np.float32), affine)

            if 'ad' in save_metrics:
                AD = axial_diffusivity(dkfit.evals)
                save_nifti(oad, AD.astype(np.float32), affine)

            if 'rd' in save_metrics:
                RD = radial_diffusivity(dkfit.evals)
                save_nifti(orad, RD.astype(np.float32), affine)

            if 'mode' in save_metrics:
                MODE = get_mode(dkfit.quadratic_form)
                save_nifti(omode, MODE.astype(np.float32), affine)

            if 'evec' in save_metrics:
                save_nifti(oevecs, dkfit.evecs.astype(np.float32), affine)

            if 'eval' in save_metrics:
                save_nifti(oevals, dkfit.evals.astype(np.float32), affine)

            if 'mk' in save_metrics:
                save_nifti(omk, dkfit.mk().astype(np.float32), affine)

            if 'ak' in save_metrics:
                save_nifti(oak, dkfit.ak().astype(np.float32), affine)

            if 'rk' in save_metrics:
                save_nifti(ork, dkfit.rk().astype(np.float32), affine)

            logging.info('DKI metrics saved in {0}'.
                         format(os.path.dirname(oevals)))
Exemple #44
0
    def run(
        self,
        input_files,
        save_masked=False,
        median_radius=2,
        numpass=5,
        autocrop=False,
        vol_idx=None,
        dilate=None,
        out_dir="",
        out_mask="brain_mask.nii.gz",
        out_masked="dwi_masked.nii.gz",
    ):
        """Workflow wrapping the median_otsu segmentation method.

        Applies median_otsu segmentation on each file found by 'globing'
        ``input_files`` and saves the results in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        save_masked : bool
            Save mask
        median_radius : int, optional
            Radius (in voxels) of the applied median filter (default 2)
        numpass : int, optional
            Number of pass of the median filter (default 5)
        autocrop : bool, optional
            If True, the masked input_volumes will also be cropped using the
            bounding box defined by the masked data. For example, if diffusion
            images are of 1x1x1 (mm^3) or higher resolution auto-cropping could
            reduce their size in memory and speed up some of the analysis.
            (default False)
        vol_idx : string, optional
            1D array representing indices of ``axis=3`` of a 4D `input_volume`
            'None' (the default) corresponds to ``(0,)`` (assumes first volume
            in 4D array)
        dilate : string, optional
            number of iterations for binary dilation (default 'None')
        out_dir : string, optional
            Output directory (default input file directory)
        out_mask : string, optional
            Name of the mask volume to be saved (default 'brain_mask.nii.gz')
        out_masked : string, optional
            Name of the masked volume to be saved (default 'dwi_masked.nii.gz')
        """
        io_it = self.get_io_iterator()

        for fpath, mask_out_path, masked_out_path in io_it:
            logging.info("Applying median_otsu segmentation on {0}".format(fpath))

            data, affine, img = load_nifti(fpath, return_img=True)

            masked_volume, mask_volume = median_otsu(data, median_radius, numpass, autocrop, vol_idx, dilate)

            save_nifti(mask_out_path, mask_volume.astype(np.float32), affine)

            logging.info("Mask saved as {0}".format(mask_out_path))

            if save_masked:
                save_nifti(masked_out_path, masked_volume, affine, img.header)

                logging.info("Masked volume saved as {0}".format(masked_out_path))

        return io_it
Exemple #45
0
    def run(self, static_image_files, moving_image_files, prealign_file='',
            inv_static=False, level_iters=[10, 10, 5], metric="cc",
            mopt_sigma_diff=2.0, mopt_radius=4, mopt_smooth=0.0,
            mopt_inner_iter=0.0, mopt_q_levels=256, mopt_double_gradient=True,
            mopt_step_type='', step_length=0.25,
            ss_sigma_factor=0.2, opt_tol=1e-5, inv_iter=20,
            inv_tol=1e-3, out_dir='', out_warped='warped_moved.nii.gz',
            out_inv_static='inc_static.nii.gz',
            out_field='displacement_field.nii.gz'):

        """
        Parameters
        ----------
        static_image_files : string
            Path of the static image file.

        moving_image_files : string
            Path to the moving image file.

        prealign_file : string, optional
            The text file containing pre alignment information via an
             affine matrix.

        inv_static : boolean, optional
            Apply the inverse mapping to the static image (default 'False').

        level_iters : variable int, optional
            The number of iterations at each level of the gaussian pyramid.
             By default, a 3-level scale space with iterations
             sequence equal to [10, 10, 5] will be used. The 0-th
             level corresponds to the finest resolution.

        metric : string, optional
            The metric to be used (Default cc, 'Cross Correlation metric').
            metric available: cc (Cross Correlation), ssd (Sum Squared
            Difference), em (Expectation-Maximization).

        mopt_sigma_diff : float, optional
            Metric option applied on Cross correlation (CC).
            The standard deviation of the Gaussian smoothing kernel to be
            applied to the update field at each iteration (default 2.0)

        mopt_radius : int, optional
            Metric option applied on Cross correlation (CC).
            the radius of the squared (cubic) neighborhood at each voxel to
            be considered to compute the cross correlation. (default 4)

        mopt_smooth : float, optional
            Metric option applied on Sum Squared Difference (SSD) and
            Expectation Maximization (EM). Smoothness parameter, the
            larger the value the smoother the deformation field.
            (default 1.0 for EM, 4.0 for SSD)

        mopt_inner_iter : int, optional
            Metric option applied on Sum Squared Difference (SSD) and
            Expectation Maximization (EM). This is number of iterations to be
            performed at each level of the multi-resolution Gauss-Seidel
            optimization algorithm (this is not the number of steps per
            Gaussian Pyramid level, that parameter must be set for the
            optimizer, not the metric). Default 5 for EM, 10 for SSD.

        mopt_q_levels : int, optional
            Metric option applied on Expectation Maximization (EM).
            Number of quantization levels (Default: 256 for EM)

        mopt_double_gradient : bool, optional
            Metric option applied on Expectation Maximization (EM).
            if True, the gradient of the expected static image under the moving
            modality will be added to the gradient of the moving image,
            similarly, the gradient of the expected moving image under the
            static modality will be added to the gradient of the static image.

        mopt_step_type : string, optional
            Metric option applied on Sum Squared Difference (SSD) and
            Expectation Maximization (EM). The optimization schedule to be
            used in the multi-resolution Gauss-Seidel optimization algorithm
            (not used if Demons Step is selected). Possible value:
            ('gauss_newton', 'demons'). default: 'gauss_newton' for EM,
            'demons' for SSD.

        step_length : float, optional
            the length of the maximum displacement vector of the update
             displacement field at each iteration.

        ss_sigma_factor : float, optional
            parameter of the scale-space smoothing kernel. For example, the
             std. dev. of the kernel will be factor*(2^i) in the isotropic case
             where i = 0, 1, ..., n_scales is the scale.

        opt_tol : float, optional
            the optimization will stop when the estimated derivative of the
             energy profile w.r.t. time falls below this threshold.

        inv_iter : int, optional
            the number of iterations to be performed by the displacement field
             inversion algorithm.

        inv_tol : float, optional
            the displacement field inversion algorithm will stop iterating
             when the inversion error falls below this threshold.

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_warped : string, optional
            Name of the warped file. (default 'warped_moved.nii.gz').

        out_inv_static : string, optional
            Name of the file to save the static image after applying the
             inverse mapping (default 'inv_static.nii.gz').

        out_field : string, optional
            Name of the file to save the diffeomorphic map.
            (default 'displacement_field.nii.gz')

        """
        io_it = self.get_io_iterator()
        metric = metric.lower()
        if metric not in ['ssd', 'cc', 'em']:
            raise ValueError("Invalid similarity metric: Please"
                             " provide a valid metric like 'ssd', 'cc', 'em'")

        logging.info("Starting Diffeormorphic Registration")
        logging.info('Using {0} Metric'.format(metric.upper()))

        # Init parameter if they are not setup
        init_param = {'ssd': {'mopt_smooth': 4.0,
                              'mopt_inner_iter': 10,
                              'mopt_step_type': 'demons'
                              },
                      'em': {'mopt_smooth': 1.0,
                             'mopt_inner_iter': 5,
                             'mopt_step_type': 'gauss_newton'
                             }
                      }
        mopt_smooth = mopt_smooth or init_param[metric]['mopt_smooth']
        mopt_inner_iter = mopt_inner_iter or  \
            init_param[metric]['mopt_inner_iter']
        mopt_step_type = mopt_step_type or \
            init_param[metric]['mopt_step_type']

        for (static_file, moving_file, owarped_file, oinv_static_file,
             omap_file) in io_it:

            logging.info('Loading static file {0}'.format(static_file))
            logging.info('Loading moving file {0}'.format(moving_file))

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_file)
            moving_image, moving_grid2world = load_nifti(moving_file)

            # Sanity check for the input image dimensions.
            check_dimensions(static_image, moving_image)

            # Loading the affine matrix.
            prealign = np.loadtxt(prealign_file) if prealign_file else None

            l_metric = {"ssd": SSDMetric(static_image.ndim,
                                         smooth=mopt_smooth,
                                         inner_iter=mopt_inner_iter,
                                         step_type=mopt_step_type
                                         ),
                        "cc": CCMetric(static_image.ndim,
                                       sigma_diff=mopt_sigma_diff,
                                       radius=mopt_radius),
                        "em": EMMetric(static_image.ndim,
                                       smooth=mopt_smooth,
                                       inner_iter=mopt_inner_iter,
                                       step_type=mopt_step_type,
                                       q_levels=mopt_q_levels,
                                       double_gradient=mopt_double_gradient)
                        }

            current_metric = l_metric.get(metric.lower())

            sdr = SymmetricDiffeomorphicRegistration(
                metric=current_metric,
                level_iters=level_iters,
                step_length=step_length,
                ss_sigma_factor=ss_sigma_factor,
                opt_tol=opt_tol,
                inv_iter=inv_iter,
                inv_tol=inv_tol
                )

            mapping = sdr.optimize(static_image, moving_image,
                                   static_grid2world, moving_grid2world,
                                   prealign)

            mapping_data = np.array([mapping.forward.T, mapping.backward.T]).T
            warped_moving = mapping.transform(moving_image)

            # Saving
            logging.info('Saving warped {0}'.format(owarped_file))
            save_nifti(owarped_file, warped_moving, static_grid2world)
            logging.info('Saving Diffeormorphic map {0}'.format(omap_file))
            save_nifti(omap_file, mapping_data, mapping.codomain_world2grid)
Exemple #46
0
    def run(self, input_files, bvalues_files, bvectors_files, mask_files,
            split_b_D=400, split_b_S0=200, b0_threshold=0, save_metrics=[],
            out_dir='', out_S0_predicted='S0_predicted.nii.gz',
            out_perfusion_fraction='perfusion_fraction.nii.gz',
            out_D_star='D_star.nii.gz', out_D='D.nii.gz'):
        """ Workflow for Intra-voxel Incoherent Motion reconstruction and for
        computing IVIM metrics. Performs a IVIM reconstruction on the files
        by 'globing' ``input_files`` and saves the IVIM metrics in a directory
        specified by ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        bvalues_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        bvectors_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        mask_files : string
            Path to the input masks. This path may contain wildcards to use
            multiple masks at once. (default: No mask used)
        split_b_D : int, optional
            Value to split the bvals to estimate D for the two-stage process of
            fitting
            (default 400)
        split_b_S0 : int, optional
            Value to split the bvals to estimate S0 for the two-stage process
            of fitting.
            (default 200)
        b0_threshold : int, optional
            Threshold value for the b0 bval.
            (default 0)
        save_metrics : variable string, optional
            List of metrics to save.
            Possible values: S0_predicted, perfusion_fraction, D_star, D
            (default [] (all))
        out_dir : string, optional
            Output directory (default input file directory)
        out_S0_predicted : string, optional
            Name of the S0 signal estimated to be saved
            (default: 'S0_predicted.nii.gz')
        out_perfusion_fraction : string, optional
            Name of the estimated volume fractions to be saved
            (default 'perfusion_fraction.nii.gz')
        out_D_star : string, optional
            Name of the estimated pseudo-diffusion parameter to be saved
            (default 'D_star.nii.gz')
        out_D : string, optional
            Name of the estimated diffusion parameter to be saved
            (default 'D.nii.gz')

        References
        ----------

        .. [Stejskal65] Stejskal, E. O.; Tanner, J. E. (1 January 1965).
                        "Spin Diffusion Measurements: Spin Echoes in the
                        Presence of a Time-Dependent Field Gradient". The
                        Journal of Chemical Physics 42 (1): 288.
                        Bibcode: 1965JChPh..42..288S. doi:10.1063/1.1695690.

        .. [LeBihan84] Le Bihan, Denis, et al. "Separation of diffusion
                       and perfusion in intravoxel incoherent motion MR
                       imaging." Radiology 168.2 (1988): 497-505.
        """

        io_it = self.get_io_iterator()

        for (dwi, bval, bvec, mask, oS0_predicted, operfusion_fraction,
             oD_star, oD) in io_it:

            logging.info('Computing IVIM metrics for {0}'.format(dwi))
            data, affine = load_nifti(dwi)

            if mask is not None:
                mask = nib.load(mask).get_data().astype(np.bool)

            ivimfit, _ = self.get_fitted_ivim(data, mask, bval, bvec,
                                              b0_threshold)

            if not save_metrics:
                save_metrics = ['S0_predicted', 'perfusion_fraction', 'D_star',
                                'D']

            if 'S0_predicted' in save_metrics:
                save_nifti(oS0_predicted,
                           ivimfit.S0_predicted.astype(np.float32), affine)

            if 'perfusion_fraction' in save_metrics:
                save_nifti(operfusion_fraction,
                           ivimfit.perfusion_fraction.astype(np.float32),
                           affine)

            if 'D_star' in save_metrics:
                save_nifti(oD_star, ivimfit.D_star.astype(np.float32), affine)

            if 'D' in save_metrics:
                save_nifti(oD, ivimfit.D.astype(np.float32), affine)

            logging.info('IVIM metrics saved in {0}'.
                         format(os.path.dirname(oD)))
Exemple #47
0
    def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta,
            b0_threshold=50.0, laplacian=True, positivity=True,
            bval_threshold=2000, save_metrics=[],
            laplacian_weighting=0.05, radial_order=6, out_dir='',
            out_rtop='rtop.nii.gz', out_lapnorm='lapnorm.nii.gz',
            out_msd='msd.nii.gz', out_qiv='qiv.nii.gz',
            out_rtap='rtap.nii.gz',
            out_rtpp='rtpp.nii.gz', out_ng='ng.nii.gz',
            out_perng='perng.nii.gz',
            out_parng='parng.nii.gz'):
        """ Workflow for fitting the MAPMRI model (with optional Laplacian
        regularization). Generates rtop, lapnorm, msd, qiv, rtap, rtpp,
        non-gaussian (ng), parallel ng, perpendicular ng saved in a nifti
        format in input files provided by `data_files` and saves the nifti
        files to an output directory specified by `out_dir`.

        In order for the MAPMRI workflow to work in the way
        intended either the laplacian or positivity or both must
        be set to True.

        Parameters
        ----------
        data_files : string
            Path to the input volume.
        bvals_files : string
            Path to the bval files.
        bvecs_files : string
            Path to the bvec files.
        small_delta : float
            Small delta value used in generation of gradient table of provided
            bval and bvec.
        big_delta : float
            Big delta value used in generation of gradient table of provided
            bval and bvec.
        b0_threshold : float, optional
            Threshold used to find b=0 directions (default 0.0)
        laplacian : bool, optional
            Regularize using the Laplacian of the MAP-MRI basis (default True)
        positivity : bool, optional
            Constrain the propagator to be positive. (default True)
        bval_threshold : float, optional
            Sets the b-value threshold to be used in the scale factor
            estimation. In order for the estimated non-Gaussianity to have
            meaning this value should set to a lower value (b<2000 s/mm^2)
            such that the scale factors are estimated on signal points that
            reasonably represent the spins at Gaussian diffusion.
            (default: 2000)
        save_metrics : variable string, optional
            List of metrics to save.
            Possible values: rtop, laplacian_signal, msd, qiv, rtap, rtpp,
            ng, perng, parng
            (default: [] (all))
        laplacian_weighting : float, optional
            Weighting value used in fitting the MAPMRI model in the laplacian
            and both model types. (default: 0.05)
        radial_order : unsigned int, optional
            Even value used to set the order of the basis
            (default: 6)
        out_dir : string, optional
            Output directory (default: input file directory)
        out_rtop : string, optional
            Name of the rtop to be saved
        out_lapnorm : string, optional
            Name of the norm of laplacian signal to be saved
        out_msd : string, optional
            Name of the msd to be saved
        out_qiv : string, optional
            Name of the qiv to be saved
        out_rtap : string, optional
            Name of the rtap to be saved
        out_rtpp : string, optional
            Name of the rtpp to be saved
        out_ng : string, optional
            Name of the Non-Gaussianity to be saved
        out_perng :  string, optional
            Name of the Non-Gaussianity perpendicular to be saved
        out_parng : string, optional
            Name of the Non-Gaussianity parallel to be saved
        """
        io_it = self.get_io_iterator()
        for (dwi, bval, bvec, out_rtop, out_lapnorm, out_msd, out_qiv,
             out_rtap, out_rtpp, out_ng, out_perng, out_parng) in io_it:

            logging.info('Computing MAPMRI metrics for {0}'.format(dwi))
            data, affine = load_nifti(dwi)

            bvals, bvecs = read_bvals_bvecs(bval, bvec)
            if b0_threshold < bvals.min():
                warn("b0_threshold (value: {0}) is too low, increase your "
                     "b0_threshold. It should higher than the first b0 value "
                     "({1}).".format(b0_threshold, bvals.min()))
            gtab = gradient_table(bvals=bvals, bvecs=bvecs,
                                  small_delta=small_delta,
                                  big_delta=big_delta,
                                  b0_threshold=b0_threshold)

            if not save_metrics:
                save_metrics = ['rtop', 'laplacian_signal', 'msd',
                                'qiv', 'rtap', 'rtpp',
                                'ng', 'perng', 'parng']

            if laplacian and positivity:
                map_model_aniso = mapmri.MapmriModel(
                            gtab,
                            radial_order=radial_order,
                            laplacian_regularization=True,
                            laplacian_weighting=laplacian_weighting,
                            positivity_constraint=True,
                            bval_threshold=bval_threshold)

                mapfit_aniso = map_model_aniso.fit(data)

            elif positivity:
                map_model_aniso = mapmri.MapmriModel(
                            gtab,
                            radial_order=radial_order,
                            laplacian_regularization=False,
                            positivity_constraint=True,
                            bval_threshold=bval_threshold)
                mapfit_aniso = map_model_aniso.fit(data)

            elif laplacian:
                map_model_aniso = mapmri.MapmriModel(
                            gtab,
                            radial_order=radial_order,
                            laplacian_regularization=True,
                            laplacian_weighting=laplacian_weighting,
                            bval_threshold=bval_threshold)
                mapfit_aniso = map_model_aniso.fit(data)

            else:
                map_model_aniso = mapmri.MapmriModel(
                            gtab,
                            radial_order=radial_order,
                            laplacian_regularization=False,
                            positivity_constraint=False,
                            bval_threshold=bval_threshold)
                mapfit_aniso = map_model_aniso.fit(data)

            if 'rtop' in save_metrics:
                r = mapfit_aniso.rtop()
                save_nifti(out_rtop, r.astype(np.float32), affine)

            if 'laplacian_signal' in save_metrics:
                ll = mapfit_aniso.norm_of_laplacian_signal()
                save_nifti(out_lapnorm, ll.astype(np.float32), affine)

            if 'msd' in save_metrics:
                m = mapfit_aniso.msd()
                save_nifti(out_msd, m.astype(np.float32), affine)

            if 'qiv' in save_metrics:
                q = mapfit_aniso.qiv()
                save_nifti(out_qiv, q.astype(np.float32), affine)

            if 'rtap' in save_metrics:
                r = mapfit_aniso.rtap()
                save_nifti(out_rtap, r.astype(np.float32), affine)

            if 'rtpp' in save_metrics:
                r = mapfit_aniso.rtpp()
                save_nifti(out_rtpp, r.astype(np.float32), affine)

            if 'ng' in save_metrics:
                n = mapfit_aniso.ng()
                save_nifti(out_ng, n.astype(np.float32), affine)

            if 'perng' in save_metrics:
                n = mapfit_aniso.ng_perpendicular()
                save_nifti(out_perng, n.astype(np.float32), affine)

            if 'parng' in save_metrics:
                n = mapfit_aniso.ng_parallel()
                save_nifti(out_parng, n.astype(np.float32), affine)

            logging.info('MAPMRI saved in {0}'.
                         format(os.path.dirname(out_dir)))
Exemple #48
0
def test_apply_affine_transform():
    with TemporaryDirectory() as temp_out_dir:

        factors = {
            ('TRANSLATION', 3): (2.0, None, np.array([2.3, 4.5, 1.7])),
            ('RIGID', 3): (0.1, None, np.array([0.1, 0.15, -0.11, 2.3, 4.5,
                                                1.7])),
            ('AFFINE', 3): (0.1, None, np.array([0.99, -0.05, 0.03, 1.3,
                                                 0.05, 0.99, -0.10, 2.5,
                                                 -0.07, 0.10, 0.99, -1.4]))}

        image_registeration_flow = ImageRegistrationFlow()
        apply_trans = ApplyTransformFlow()

        for i in factors.keys():
            static, moving, static_g2w, moving_g2w, smask, mmask, M = \
                setup_random_transform(transform=regtransforms[i],
                                       rfactor=factors[i][0])

            stat_file = str(i[0]) + '_static.nii.gz'
            mov_file = str(i[0]) + '_moving.nii.gz'

            save_nifti(pjoin(temp_out_dir, stat_file), data=static,
                       affine=static_g2w)

            save_nifti(pjoin(temp_out_dir, mov_file), data=moving,
                       affine=moving_g2w)

            static_image_file = pjoin(temp_out_dir,
                                      str(i[0]) + '_static.nii.gz')
            moving_image_file = pjoin(temp_out_dir,
                                      str(i[0]) + '_moving.nii.gz')

            out_moved = pjoin(temp_out_dir,
                              str(i[0]) + "_moved.nii.gz")
            out_affine = pjoin(temp_out_dir,
                               str(i[0]) + "_affine.txt")

            if str(i[0]) == "TRANSLATION":
                transform_type = "trans"
            else:
                transform_type = str(i[0]).lower()

            image_registeration_flow.run(static_image_file, moving_image_file,
                                         transform=transform_type,
                                         out_dir=temp_out_dir,
                                         out_moved=out_moved,
                                         out_affine=out_affine,
                                         level_iters=[1, 1, 1],
                                         save_metric=False)

            # Checking for the created moved file.
            assert os.path.exists(out_moved)
            assert os.path.exists(out_affine)

        images = pjoin(temp_out_dir, '*moving*')
        apply_trans.run(static_image_file, images,
                        out_dir=temp_out_dir,
                        transform_map_file=out_affine)

        # Checking for the transformed file.
        assert os.path.exists(pjoin(temp_out_dir, "transformed.nii.gz"))
Exemple #49
0
def test_horizon_flow():

    s1 = 10 * np.array([[0, 0, 0],
                        [1, 0, 0],
                        [2, 0, 0],
                        [3, 0, 0],
                        [4, 0, 0]], dtype='f8')

    s2 = 10 * np.array([[0, 0, 0],
                        [0, 1, 0],
                        [0, 2, 0],
                        [0, 3, 0],
                        [0, 4, 0]], dtype='f8')

    s3 = 10 * np.array([[0, 0, 0],
                        [1, 0.2, 0],
                        [2, 0.2, 0],
                        [3, 0.2, 0],
                        [4, 0.2, 0]], dtype='f8')

    print(s1.shape)
    print(s2.shape)
    print(s3.shape)

    streamlines = Streamlines()
    streamlines.append(s1)
    streamlines.append(s2)
    streamlines.append(s3)

    tractograms = [streamlines]
    images = None

    horizon(tractograms, images=images, cluster=True, cluster_thr=5,
            random_colors=False, length_lt=np.inf, length_gt=0,
            clusters_lt=np.inf, clusters_gt=0,
            world_coords=False, interactive=False)
#
    affine = np.diag([2., 1, 1, 1]).astype('f8')
#
    data = 255 * np.random.rand(150, 150, 150)
#
    images = [(data, affine)]

    horizon(tractograms, images=images, cluster=True, cluster_thr=5,
            random_colors=False, length_lt=np.inf, length_gt=0,
            clusters_lt=np.inf, clusters_gt=0,
            world_coords=True, interactive=False)

    with TemporaryDirectory() as out_dir:

        fimg = os.path.join(out_dir, 'test.nii.gz')
        ftrk = os.path.join(out_dir, 'test.trk')

        save_nifti(fimg, data, affine)
        save_tractogram(ftrk, streamlines, affine)

        input_files = [ftrk, fimg]

        npt.assert_equal(len(input_files), 2)

        hz_flow = HorizonFlow()

        hz_flow.run(input_files=input_files, stealth=True,
                    out_dir=out_dir, out_stealth_png='tmp_x.png')

        npt.assert_equal(os.path.exists(os.path.join(out_dir, 'tmp_x.png')),
                         True)
Exemple #50
0
def test_image_registration():
    with TemporaryDirectory() as temp_out_dir:

        static, moving, static_g2w, moving_g2w, smask, mmask, M\
            = setup_random_transform(transform=regtransforms[('AFFINE', 3)],
                                     rfactor=0.1)

        save_nifti(pjoin(temp_out_dir, 'b0.nii.gz'), data=static,
                   affine=static_g2w)
        save_nifti(pjoin(temp_out_dir, 't1.nii.gz'), data=moving,
                   affine=moving_g2w)

        static_image_file = pjoin(temp_out_dir, 'b0.nii.gz')
        moving_image_file = pjoin(temp_out_dir, 't1.nii.gz')

        image_registeration_flow = ImageRegistrationFlow()

        def read_distance(qual_fname):
            temp_val = 0
            with open(pjoin(temp_out_dir, qual_fname), 'r') as f:
                temp_val = f.readlines()[-1]
            return float(temp_val)

        def test_com():

            out_moved = pjoin(temp_out_dir, "com_moved.nii.gz")
            out_affine = pjoin(temp_out_dir, "com_affine.txt")

            image_registeration_flow._force_overwrite = True
            image_registeration_flow.run(static_image_file,
                                         moving_image_file,
                                         transform='com',
                                         out_dir=temp_out_dir,
                                         out_moved=out_moved,
                                         out_affine=out_affine)
            check_existence(out_moved, out_affine)

        def test_translation():

            out_moved = pjoin(temp_out_dir, "trans_moved.nii.gz")
            out_affine = pjoin(temp_out_dir, "trans_affine.txt")

            image_registeration_flow._force_overwrite = True
            image_registeration_flow.run(static_image_file,
                                         moving_image_file,
                                         transform='trans',
                                         out_dir=temp_out_dir,
                                         out_moved=out_moved,
                                         out_affine=out_affine,
                                         save_metric=True,
                                         level_iters=[100, 10, 1],
                                         out_quality='trans_q.txt')

            dist = read_distance('trans_q.txt')
            npt.assert_almost_equal(float(dist), -0.3953547764454917, 1)
            check_existence(out_moved, out_affine)

        def test_rigid():

            out_moved = pjoin(temp_out_dir, "rigid_moved.nii.gz")
            out_affine = pjoin(temp_out_dir, "rigid_affine.txt")

            image_registeration_flow._force_overwrite = True
            image_registeration_flow.run(static_image_file,
                                         moving_image_file,
                                         transform='rigid',
                                         out_dir=temp_out_dir,
                                         out_moved=out_moved,
                                         out_affine=out_affine,
                                         save_metric=True,
                                         level_iters=[100, 10, 1],
                                         out_quality='rigid_q.txt')

            dist = read_distance('rigid_q.txt')
            npt.assert_almost_equal(dist, -0.6900534794005155, 1)
            check_existence(out_moved, out_affine)

        def test_affine():

            out_moved = pjoin(temp_out_dir, "affine_moved.nii.gz")
            out_affine = pjoin(temp_out_dir, "affine_affine.txt")

            image_registeration_flow._force_overwrite = True
            image_registeration_flow.run(static_image_file,
                                         moving_image_file,
                                         transform='affine',
                                         out_dir=temp_out_dir,
                                         out_moved=out_moved,
                                         out_affine=out_affine,
                                         save_metric=True,
                                         level_iters=[100, 10, 1],
                                         out_quality='affine_q.txt')

            dist = read_distance('affine_q.txt')
            npt.assert_almost_equal(dist, -0.7670650775914811, 1)
            check_existence(out_moved, out_affine)

        # Creating the erroneous behavior
        def test_err():
            image_registeration_flow._force_overwrite = True
            npt.assert_raises(ValueError, image_registeration_flow.run,
                              static_image_file,
                              moving_image_file,
                              transform='notransform')

            image_registeration_flow._force_overwrite = True
            npt.assert_raises(ValueError, image_registeration_flow.run,
                              static_image_file,
                              moving_image_file,
                              metric='wrong_metric')

        def check_existence(movedfile, affine_mat_file):
            assert os.path.exists(movedfile)
            assert os.path.exists(affine_mat_file)
            return True

        test_com()
        test_translation()
        test_rigid()
        test_affine()
        test_err()
Exemple #51
0
def test_local_fiber_tracking_workflow():
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_fnames('small_64D')
        vol_img = nib.load(data_path)
        volume = vol_img.get_data()
        mask = np.ones_like(volume[:, :, :, 0])
        mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        nib.save(mask_img, mask_path)

        reconst_csd_flow = ReconstCSDFlow()
        reconst_csd_flow.run(data_path, bval_path, bvec_path, mask_path,
                             out_dir=out_dir, extract_pam_values=True)

        pam_path = reconst_csd_flow.last_generated_outputs['out_pam']
        gfa_path = reconst_csd_flow.last_generated_outputs['out_gfa']

        # Create seeding mask by thresholding the gfa
        mask_flow = MaskFlow()
        mask_flow.run(gfa_path, 0.8, out_dir=out_dir)
        seeds_path = mask_flow.last_generated_outputs['out_mask']

        # Put identity in gfa path to prevent impossible to use
        # local tracking because of affine containing shearing.
        gfa_img = nib.load(gfa_path)
        save_nifti(gfa_path, gfa_img.get_data(), np.eye(4), gfa_img.header)

        # Test tracking with pam no sh
        lf_track_pam = LocalFiberTrackingPAMFlow()
        lf_track_pam._force_overwrite = True
        assert_equal(lf_track_pam.get_short_name(), 'track_local')
        lf_track_pam.run(pam_path, gfa_path, seeds_path)
        tractogram_path = \
            lf_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh
        lf_track_pam = LocalFiberTrackingPAMFlow()
        lf_track_pam._force_overwrite = True
        lf_track_pam.run(pam_path, gfa_path, seeds_path,
                         tracking_method="eudx")
        tractogram_path = \
            lf_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh and deterministic getter
        lf_track_pam = LocalFiberTrackingPAMFlow()
        lf_track_pam._force_overwrite = True
        lf_track_pam.run(pam_path, gfa_path, seeds_path,
                         tracking_method="deterministic")
        tractogram_path = \
            lf_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh and probabilistic getter
        lf_track_pam = LocalFiberTrackingPAMFlow()
        lf_track_pam._force_overwrite = True
        lf_track_pam.run(pam_path, gfa_path, seeds_path,
                         tracking_method="probabilistic")
        tractogram_path = \
            lf_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))

        # Test tracking with pam with sh and closestpeaks getter
        lf_track_pam = LocalFiberTrackingPAMFlow()
        lf_track_pam._force_overwrite = True
        lf_track_pam.run(pam_path, gfa_path, seeds_path,
                         tracking_method="closestpeaks")
        tractogram_path = \
            lf_track_pam.last_generated_outputs['out_tractogram']
        assert_false(is_tractogram_empty(tractogram_path))
Exemple #52
0
    def run(self, input_files, bvalues_files, bvectors_files, mask_files,
            b0_threshold=50, bvecs_tol=0.01, save_metrics=[],
            out_dir='', out_tensor='tensors.nii.gz', out_fa='fa.nii.gz',
            out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',
            out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',
            out_evec='evecs.nii.gz', out_eval='evals.nii.gz'):
        """ Workflow for tensor reconstruction and for computing DTI metrics.
        using Weighted Least-Squares.
        Performs a tensor reconstruction on the files by 'globing'
        ``input_files`` and saves the DTI metrics in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        bvalues_files : string
            Path to the bvalues files. This path may contain wildcards to use
            multiple bvalues files at once.
        bvectors_files : string
            Path to the bvectors files. This path may contain wildcards to use
            multiple bvectors files at once.
        mask_files : string
            Path to the input masks. This path may contain wildcards to use
            multiple masks at once. (default: No mask used)
        b0_threshold : float, optional
            Threshold used to find b=0 directions (default 0.0)
        bvecs_tol : float, optional
            Threshold used to check that norm(bvec) = 1 +/- bvecs_tol
            b-vectors are unit vectors (default 0.01)
        save_metrics : variable string, optional
            List of metrics to save.
            Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval
            (default [] (all))
        out_dir : string, optional
            Output directory (default input file directory)
        out_tensor : string, optional
            Name of the tensors volume to be saved (default 'tensors.nii.gz')
        out_fa : string, optional
            Name of the fractional anisotropy volume to be saved
            (default 'fa.nii.gz')
        out_ga : string, optional
            Name of the geodesic anisotropy volume to be saved
            (default 'ga.nii.gz')
        out_rgb : string, optional
            Name of the color fa volume to be saved (default 'rgb.nii.gz')
        out_md : string, optional
            Name of the mean diffusivity volume to be saved
            (default 'md.nii.gz')
        out_ad : string, optional
            Name of the axial diffusivity volume to be saved
            (default 'ad.nii.gz')
        out_rd : string, optional
            Name of the radial diffusivity volume to be saved
            (default 'rd.nii.gz')
        out_mode : string, optional
            Name of the mode volume to be saved (default 'mode.nii.gz')
        out_evec : string, optional
            Name of the eigenvectors volume to be saved
            (default 'evecs.nii.gz')
        out_eval : string, optional
            Name of the eigenvalues to be saved (default 'evals.nii.gz')

        References
        ----------
        .. [1] Basser, P.J., Mattiello, J., LeBihan, D., 1994. Estimation of
           the effective self-diffusion tensor from the NMR spin echo. J Magn
           Reson B 103, 247-254.

        .. [2] Basser, P., Pierpaoli, C., 1996. Microstructural and
           physiological features of tissues elucidated by quantitative
           diffusion-tensor MRI.  Journal of Magnetic Resonance 111, 209-219.

        .. [3] Lin-Ching C., Jones D.K., Pierpaoli, C. 2005. RESTORE: Robust
           estimation of tensors by outlier rejection. MRM 53: 1088-1095

        .. [4] hung, SW., Lu, Y., Henry, R.G., 2006. Comparison of bootstrap
           approaches for estimation of uncertainties of DTI parameters.
           NeuroImage 33, 531-541.

        """
        io_it = self.get_io_iterator()

        for dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad, \
                omode, oevecs, oevals in io_it:

            logging.info('Computing DTI metrics for {0}'.format(dwi))
            data, affine = load_nifti(dwi)

            if mask is not None:
                mask = nib.load(mask).get_data().astype(np.bool)

            tenfit, _ = self.get_fitted_tensor(data, mask, bval, bvec,
                                               b0_threshold, bvecs_tol)

            if not save_metrics:
                save_metrics = ['fa', 'md', 'rd', 'ad', 'ga', 'rgb', 'mode',
                                'evec', 'eval', 'tensor']

            FA = fractional_anisotropy(tenfit.evals)
            FA[np.isnan(FA)] = 0
            FA = np.clip(FA, 0, 1)

            if 'tensor' in save_metrics:
                tensor_vals = lower_triangular(tenfit.quadratic_form)
                correct_order = [0, 1, 3, 2, 4, 5]
                tensor_vals_reordered = tensor_vals[..., correct_order]

                save_nifti(otensor, tensor_vals_reordered.astype(np.float32),
                           affine)

            if 'fa' in save_metrics:
                save_nifti(ofa, FA.astype(np.float32), affine)

            if 'ga' in save_metrics:
                GA = geodesic_anisotropy(tenfit.evals)
                save_nifti(oga, GA.astype(np.float32), affine)

            if 'rgb' in save_metrics:
                RGB = color_fa(FA, tenfit.evecs)
                save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine)

            if 'md' in save_metrics:
                MD = mean_diffusivity(tenfit.evals)
                save_nifti(omd, MD.astype(np.float32), affine)

            if 'ad' in save_metrics:
                AD = axial_diffusivity(tenfit.evals)
                save_nifti(oad, AD.astype(np.float32), affine)

            if 'rd' in save_metrics:
                RD = radial_diffusivity(tenfit.evals)
                save_nifti(orad, RD.astype(np.float32), affine)

            if 'mode' in save_metrics:
                MODE = get_mode(tenfit.quadratic_form)
                save_nifti(omode, MODE.astype(np.float32), affine)

            if 'evec' in save_metrics:
                save_nifti(oevecs, tenfit.evecs.astype(np.float32), affine)

            if 'eval' in save_metrics:
                save_nifti(oevals, tenfit.evals.astype(np.float32), affine)

            dname_ = os.path.dirname(oevals)
            if dname_ == '':
                logging.info('DTI metrics saved in current directory')
            else:
                logging.info(
                        'DTI metrics saved in {0}'.format(dname_))