def test_resample(): fimg, _, _ = get_data("small_25") img = nib.load(fimg) data = img.get_data() affine = img.get_affine() zooms = img.get_header().get_zooms()[:3] # test that new zooms are correctly from the affine (check with 3D volume) new_zooms = (1, 1.2, 2.1) data2, affine2 = reslice(data[..., 0], affine, zooms, new_zooms, order=1, mode='constant') img2 = nib.Nifti1Image(data2, affine2) new_zooms_confirmed = img2.get_header().get_zooms()[:3] assert_almost_equal(new_zooms, new_zooms_confirmed) # same with resample new_zooms = (1, 1.2, 2.1) data2, affine2 = resample(data[..., 0], affine, zooms, new_zooms, order=1, mode='constant') img2 = nib.Nifti1Image(data2, affine2) new_zooms_confirmed = img2.get_header().get_zooms()[:3] assert_almost_equal(new_zooms, new_zooms_confirmed) # test that shape changes correctly for the first 3 dimensions (check 4D) new_zooms = (1, 1, 1.) data2, affine2 = reslice(data, affine, zooms, new_zooms, order=0, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data2.shape[:3]) assert_equal(data2.shape[-1], data.shape[-1]) # same with different interpolation order new_zooms = (1, 1, 1.) data3, affine2 = reslice(data, affine, zooms, new_zooms, order=5, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data3.shape[:3]) assert_equal(data3.shape[-1], data.shape[-1]) # test that the sigma will be reduced with interpolation sigmas = estimate_sigma(data) sigmas2 = estimate_sigma(data2) sigmas3 = estimate_sigma(data3) assert_(np.all(sigmas > sigmas2)) assert_(np.all(sigmas2 > sigmas3))
def test_resample(): fimg, _, _ = get_fnames("small_25") data, affine, zooms = load_nifti(fimg, return_voxsize=True) # test that new zooms are correctly from the affine (check with 3D volume) new_zooms = (1, 1.2, 2.1) data2, affine2 = reslice(data[..., 0], affine, zooms, new_zooms, order=1, mode='constant') img2 = nib.Nifti1Image(data2, affine2) new_zooms_confirmed = img2.header.get_zooms()[:3] assert_almost_equal(new_zooms, new_zooms_confirmed) # test that shape changes correctly for the first 3 dimensions (check 4D) new_zooms = (1, 1, 1.) data2, affine2 = reslice(data, affine, zooms, new_zooms, order=0, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data2.shape[:3]) assert_equal(data2.shape[-1], data.shape[-1]) # same with different interpolation order new_zooms = (1, 1, 1.) data3, affine2 = reslice(data, affine, zooms, new_zooms, order=5, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data3.shape[:3]) assert_equal(data3.shape[-1], data.shape[-1]) # test that the sigma will be reduced with interpolation sigmas = estimate_sigma(data) sigmas2 = estimate_sigma(data2) sigmas3 = estimate_sigma(data3) assert_(np.all(sigmas > sigmas2)) assert_(np.all(sigmas2 > sigmas3)) # check that 4D resampling matches 3D resampling data2, affine2 = reslice(data, affine, zooms, new_zooms) for i in range(data.shape[-1]): _data, _affine = reslice(data[..., i], affine, zooms, new_zooms) assert_almost_equal(data2[..., i], _data) assert_almost_equal(affine2, _affine) # check use of multiprocessing pool of specified size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=4) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3) # check use of multiprocessing pool of autoconfigured size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=-1) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3) # test invalid values of num_threads assert_raises(ValueError, reslice, data, affine, zooms, new_zooms, num_processes=0)
def test_resample(): fimg, _, _ = get_data("small_25") img = nib.load(fimg) data = img.get_data() affine = img.get_affine() zooms = img.get_header().get_zooms()[:3] # test that new zooms are correctly from the affine (check with 3D volume) new_zooms = (1, 1.2, 2.1) data2, affine2 = reslice(data[..., 0], affine, zooms, new_zooms, order=1, mode='constant') img2 = nib.Nifti1Image(data2, affine2) new_zooms_confirmed = img2.get_header().get_zooms()[:3] assert_almost_equal(new_zooms, new_zooms_confirmed) # test that shape changes correctly for the first 3 dimensions (check 4D) new_zooms = (1, 1, 1.) data2, affine2 = reslice(data, affine, zooms, new_zooms, order=0, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data2.shape[:3]) assert_equal(data2.shape[-1], data.shape[-1]) # same with different interpolation order new_zooms = (1, 1, 1.) data3, affine2 = reslice(data, affine, zooms, new_zooms, order=5, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data3.shape[:3]) assert_equal(data3.shape[-1], data.shape[-1]) # test that the sigma will be reduced with interpolation sigmas = estimate_sigma(data) sigmas2 = estimate_sigma(data2) sigmas3 = estimate_sigma(data3) assert_(np.all(sigmas > sigmas2)) assert_(np.all(sigmas2 > sigmas3)) # check that 4D resampling matches 3D resampling data2, affine2 = reslice(data, affine, zooms, new_zooms) for i in range(data.shape[-1]): _data, _affine = reslice(data[..., i], affine, zooms, new_zooms) assert_almost_equal(data2[..., i], _data) assert_almost_equal(affine2, _affine) # check use of multiprocessing pool of specified size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=4) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3) # check use of multiprocessing pool of autoconfigured size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=0) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3)
def test_estimate_sigma(): sigma = estimate_sigma(np.ones((7, 7, 7)), disable_background_masking=True) assert_equal(sigma, 0.0) sigma = estimate_sigma(np.ones((7, 7, 7, 3)), disable_background_masking=True) assert_equal(sigma, np.array([0.0, 0.0, 0.0])) sigma = estimate_sigma(5 * np.ones((7, 7, 7)), disable_background_masking=False) assert_equal(sigma, 0.0) sigma = estimate_sigma(5 * np.ones((7, 7, 7, 3)), disable_background_masking=False) assert_equal(sigma, np.array([0.0, 0.0, 0.0])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False, N=1) assert_array_almost_equal(sigma, 0.10286889997472792 / np.sqrt(0.42920367320510366)) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False, N=1) assert_array_almost_equal( sigma, np.array( [ 0.10286889997472792 / np.sqrt(0.42920367320510366), 0.10286889997472792 / np.sqrt(0.42920367320510366), 0.10286889997472792 / np.sqrt(0.42920367320510366), ] ), ) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=4) assert_array_almost_equal(sigma, 0.46291005 / np.sqrt(0.4834941393603609)) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=0) assert_array_almost_equal(sigma, 0.46291005 / np.sqrt(1)) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=12) assert_array_almost_equal( sigma, np.array( [ 0.46291005 / np.sqrt(0.4946862482541263), 0.46291005 / np.sqrt(0.4946862482541263), 0.46291005 / np.sqrt(0.4946862482541263), ] ), )
def test_denoise(): """ """ fdata, fbval, fbvec = dpd.get_fnames() # Test on 4D image: data = nib.load(fdata).get_data() sigma1 = estimate_sigma(data) nlmeans(data, sigma=sigma1) # Test on 3D image: data = data[..., 0] sigma2 = estimate_sigma(data) nlmeans(data, sigma=sigma2)
def test_estimate_sigma(): sigma = estimate_sigma(np.ones((7, 7, 7)), disable_background_masking=True) assert_equal(sigma, 0.) sigma = estimate_sigma(np.ones((7, 7, 7, 3)), disable_background_masking=True) assert_equal(sigma, np.array([0., 0., 0.])) sigma = estimate_sigma(5 * np.ones((7, 7, 7)), disable_background_masking=False) assert_equal(sigma, 0.) sigma = estimate_sigma(5 * np.ones((7, 7, 7, 3)), disable_background_masking=False) assert_equal(sigma, np.array([0., 0., 0.])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False) assert_array_almost_equal(sigma, 0.10286889997472792) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False) assert_array_almost_equal(sigma, np.array([0.10286889997472792, 0.10286889997472792, 0.10286889997472792])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True) assert_array_almost_equal(sigma, 0.46291005) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True) assert_array_almost_equal(sigma, np.array([0.46291005, 0.46291005, 0.46291005]))
def test_estimate_sigma(): sigma = estimate_sigma(np.ones((7, 7, 7)), disable_background_masking=True) assert_equal(sigma, 0.) sigma = estimate_sigma(np.ones((7, 7, 7, 3)), disable_background_masking=True) assert_equal(sigma, np.array([0., 0., 0.])) sigma = estimate_sigma(5 * np.ones((7, 7, 7)), disable_background_masking=False) assert_equal(sigma, 0.) sigma = estimate_sigma(5 * np.ones((7, 7, 7, 3)), disable_background_masking=False) assert_equal(sigma, np.array([0., 0., 0.])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False, N=1) assert_array_almost_equal( sigma, 0.10286889997472792 / np.sqrt(0.42920367320510366)) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False, N=1) assert_array_almost_equal( sigma, np.array([ 0.10286889997472792 / np.sqrt(0.42920367320510366), 0.10286889997472792 / np.sqrt(0.42920367320510366), 0.10286889997472792 / np.sqrt(0.42920367320510366) ])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=4) assert_array_almost_equal(sigma, 0.46291005 / np.sqrt(0.4834941393603609)) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=0) assert_array_almost_equal(sigma, 0.46291005 / np.sqrt(1)) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True, N=12) assert_array_almost_equal( sigma, np.array([ 0.46291005 / np.sqrt(0.4946862482541263), 0.46291005 / np.sqrt(0.4946862482541263), 0.46291005 / np.sqrt(0.4946862482541263) ]))
def nlmeans_denoise_img(img, mask, N=4): """ Apply dipy nlmeans denoising to the img. Useful for diffusion images. Parameters ---------- img: nibabel.Nifti1Image The diffusion image mask: nibabel.Nifti1Image A brain mask image. N: int Number of arrays of the head coil used to acquired the image. Returns ------- den_img: nibabel.Nifti1Image A denoised nifti image object with the same headers and affine as `img`. """ from dipy.denoise.nlmeans import nlmeans from dipy.denoise.noise_estimate import estimate_sigma data = img.get_data() msk = mask.get_data() sigma = estimate_sigma(data, N=N) return nlmeans(data, sigma=sigma, mask=msk)
def preprocess(nifti, name): """Preprocess the 3D MRI image before image segmentation""" image = nifti.get_fdata() sigma = estimate_sigma(image, N=16) # N: number of coils in the receiver of the MRI scanner denoised = nlmeans(image, sigma) denoised_nifti = nib.Nifti1Image(denoised, nifti.affine) nib.save(denoised_nifti, f'lab4/data/clean_{name}.nii.gz')
def test_ascm_accuracy(): test_ascm_data_ref = nib.load(dpd.get_data("ascm_test")).get_data() test_data = nib.load(dpd.get_data("aniso_vox")).get_data() # the test data was constructed in this manner mask = test_data > 50 sigma = estimate_sigma(test_data, N=4) den_small = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) den_large = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1, rician=True) S0n = np.array(adaptive_soft_matching(test_data, den_small, den_large, sigma[0])) assert_array_almost_equal(S0n, test_ascm_data_ref)
def _run_interface(self, runtime): import dipy.reconst.dti as dti import dipy.denoise.noise_estimate as ne from dipy.core.gradients import gradient_table from nipype.utils.filemanip import split_filename import nibabel as nib fname = self.inputs.in_file img = nib.load(fname) data = img.get_data() affine = img.get_affine() bvals = self.inputs.bval bvecs = self.inputs.bvec gtab = gradient_table(bvals, bvecs) sigma = ne.estimate_sigma(data) dti = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma) dtifit = dti.fit(data) fa = dtifit.fa _, base, _ = split_filename(fname) nib.save(nib.Nifti1Image(fa, affine), base + '_FA.nii') return runtime
def test_ascm_accuracy(): f_name = dpd.get_fnames("ascm_test") test_ascm_data_ref = np.asanyarray(nib.load(f_name).dataobj) test_data = np.asanyarray(nib.load(dpd.get_fnames("aniso_vox")).dataobj) # the test data was constructed in this manner mask = test_data > 50 sigma = estimate_sigma(test_data, N=4) den_small = non_local_means(test_data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) den_large = non_local_means(test_data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1, rician=True) S0n = np.array( adaptive_soft_matching(test_data, den_small, den_large, sigma[0])) assert_array_almost_equal(S0n, test_ascm_data_ref)
def __produceTensors(self, source, bValsFile, bVecsFile, mask, fitMethod): self.info("Starting tensors creation from dipy on {}".format(source)) dwiImage = nibabel.load(source) maskImage = nibabel.load(mask) maskData = maskImage.get_data() dwiData = dwiImage.get_data() dwiData = dipy.segment.mask.applymask(dwiData, maskData) gradientTable = dipy.core.gradients.gradient_table(numpy.loadtxt(bValsFile), numpy.loadtxt(bVecsFile)) self.info('WARNING: We need to flip the x direction due to MRtrix new way to extract bvecs') gradientTable.bvecs = gradientTable.bvecs * numpy.array([-1,1,1]) if fitMethod.lower() is ('restore' or 'rt'): import dipy.denoise.noise_estimate as noise_estimate sigma = noise_estimate.estimate_sigma(dwiData) model = dipy.reconst.dti.TensorModel(gradientTable, fit_method=fitMethod, sigma=sigma) else: model = dipy.reconst.dti.TensorModel(gradientTable, fit_method=fitMethod) fit = model.fit(dwiData) # Fitting method tensorsValues = dipy.reconst.dti.lower_triangular(fit.quadratic_form) correctOrder = [0, 1, 3, 2, 4, 5] tensorsValuesReordered = tensorsValues[:, :, :, correctOrder] tensorsImage = nibabel.Nifti1Image(tensorsValuesReordered.astype(numpy.float32), dwiImage.get_affine()) nibabel.save(tensorsImage, self.buildName(source, "tensor")) nibabel.save(nibabel.Nifti1Image(fit.fa.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "fa")) nibabel.save(nibabel.Nifti1Image(fit.ad.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "ad")) nibabel.save(nibabel.Nifti1Image(fit.rd.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "rd")) nibabel.save(nibabel.Nifti1Image(fit.md.astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "md")) nibabel.save(nibabel.Nifti1Image(fit.evecs[0].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v1")) nibabel.save(nibabel.Nifti1Image(fit.evecs[1].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v2")) nibabel.save(nibabel.Nifti1Image(fit.evecs[2].astype(numpy.float32), dwiImage.get_affine()), self.buildName(source, "v3")) faColor = numpy.clip(fit.fa, 0, 1) rgb = dipy.reconst.dti.color_fa(faColor, fit.evecs) nibabel.save(nibabel.Nifti1Image(numpy.array(255 * rgb, 'uint8'), dwiImage.get_affine()), self.buildName(source, "tensor_rgb")) return fit
def denoise(x, mask): subjectid = x[0][0] from dipy.denoise import nlmeans from dipy.denoise.noise_estimate import estimate_sigma sigma = estimate_sigma(x[1]) #return(x[0], nlmeans.nlmeans(x[1], num_threads=1, sigma=sigma, mask=mask.value[str(subjectid)])) return (x[0], nlmeans.nlmeans(x[1], sigma=sigma, mask=mask.value[str(subjectid)]))
def run(self, input_files, sigma=0, patch_radius=1, block_radius=5, rician=True, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm (default: auto estimation). patch_radius : int, optional patch size is ``2 x patch_radius + 1``. Default is 1. block_radius : int, optional block size is ``2 x block_radius + 1``. Default is 5. rician : bool, optional If True the noise is estimated as Rician, otherwise Gaussian noise is assumed. out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional Name of the resulting denoised volume (default: dwi_nlmeans.nii.gz) References ---------- .. [Descoteaux08] Descoteaux, Maxime and Wiest-Daesslé, Nicolas and Prima, Sylvain and Barillot, Christian and Deriche, Rachid. Impact of Rician Adapted Non-Local Means Filtering on HARDI, MICCAI 2008 """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising %s', fpath) data, affine, image = load_nifti(fpath, return_img=True) if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma=sigma, patch_radius=patch_radius, block_radius=block_radius, rician=rician) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as %s', odenoised)
def denoise_image(self, image, mask): sigma = estimate_sigma(image, N=4) den = nlmeans(image, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) diff = np.abs(den.astype('f8') - image.astype('f8')) return den, diff
def apply_non_local_means(self, pr=1, br=3): # first use non local means to de noise the volume dipy_sigma = estimate_sigma(self.volume, N=0) self.volume = dipy.denoise.nlmeans.nlmeans(self.volume[:], mask=self.mask_3d, sigma=dipy_sigma, patch_radius=pr, block_radius=br, rician=False) #self.volume = median_filter(self.volume, size=3) return self.volume
def _run_interface(self, runtime): import nibabel as nib from dipy.denoise.nlmeans import nlmeans from dipy.denoise.noise_estimate import estimate_sigma import numpy as np img = nib.load(self.inputs.in_file) data = img.get_data() affine = img.affine if len(data.shape) > 3: den = np.zeros(data.shape) for i in range(data.shape[-1]): print('direction # ' + str(i)) sigma = estimate_sigma(data[..., i], N=4) den[..., i] = nlmeans(data[..., i], sigma=sigma, patch_radius=1, block_radius=5, rician=True) nib.save(nib.Nifti1Image(den.astype(np.float32), img.affine), 'denoised.nii.gz') else: sigma = estimate_sigma(data, N=4) den = nlmeans(data, sigma=sigma, patch_radius=1, block_radius=5, rician=True) nib.save(nib.Nifti1Image(den, affine), 'denoised.nii.gz') return runtime
def _get_basic_sigma(data, log): # We force to zero as the 3T is either oversmoothed or still noisy, but # we prefer the second option log.info("In basic noise estimation, N=0 is enforced!") sigma = estimate_sigma(data, N=0) # Use a single value for all of the volumes. # This is the same value for a given bval with this estimator sigma = np.median(sigma) log.info('The noise standard deviation from the basic estimation ' 'is %s', sigma) # Broadcast the single value to a whole 3D volume for nlmeans return np.ones(data.shape[:3]) * sigma
def denoise_nlmeans(data_in, patch_radius=1, block_radius=5): """ data_in: nd_array to denoise for more info about patch_radius and block radius, please refer to the dipy website: http://nipy.org/dipy/reference/dipy.denoise.html#dipy.denoise.nlmeans.nlmeans """ from dipy.denoise.nlmeans import nlmeans from dipy.denoise.noise_estimate import estimate_sigma from numpy import asarray data_in = asarray(data_in) block_radius_max = min(data_in.shape)-1 block_radius = block_radius_max if block_radius > block_radius_max else block_radius sigma = estimate_sigma(data_in) denoised = nlmeans(data_in, sigma, patch_radius=patch_radius, block_radius=block_radius) return denoised
def denoise_nlmeans(data_in, patch_radius=1, block_radius=5): """ :param data_in: nd_array to denoise .. note:: for more info about patch_radius and block radius, please refer to the dipy website: http://dipy.org/dipy/reference/dipy.denoise.html#dipy.denoise.nlmeans.nlmeans """ data_in = np.asarray(data_in) block_radius_max = min(data_in.shape) - 1 block_radius = block_radius_max if block_radius > block_radius_max else block_radius sigma = estimate_sigma(data_in) denoised = nlmeans(data_in, sigma, patch_radius=patch_radius, block_radius=block_radius) return denoised
def run(self, input_files, sigma=0, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """ Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm (default: auto estimation). out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional Name of the resuting denoised volume (default: dwi_nlmeans.nii.gz) """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising {0}'.format(fpath)) image = nib.load(fpath) data = image.get_data() if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma) denoised_image = nib.Nifti1Image(denoised_data, image.get_affine(), image.get_header()) denoised_image.to_filename(odenoised) logging.info('Denoised volume saved as {0}'.format(odenoised))
def denoise_nlmeans(data_in, patch_radius=1, block_radius=5): """ data_in: nd_array to denoise for more info about patch_radius and block radius, please refer to the dipy website: http://nipy.org/dipy/reference/dipy.denoise.html#dipy.denoise.nlmeans.nlmeans """ from dipy.denoise.nlmeans import nlmeans from dipy.denoise.noise_estimate import estimate_sigma from numpy import asarray data_in = asarray(data_in) block_radius_max = min(data_in.shape) - 1 block_radius = block_radius_max if block_radius > block_radius_max else block_radius sigma = estimate_sigma(data_in) denoised = nlmeans(data_in, sigma, patch_radius=patch_radius, block_radius=block_radius) return denoised
def fit(self): """ Fits a dki model to the data Standard DTI measures (FA,MD,RD,AD) are calculated from the DKI model, accounting for additional variance. DKI measures of MK,RK,AK are computed from the DKI model. Residual image has the same dimensions as the original input image. Calculated as |Avg(B0_volumes)-predicted_image| Noise is equal to the stdev across volumes of the residual image, and SNR is Avg(B0_volumes)/Noise, removing NaNs. """ data = self.data.get_data() #Generate the dk model print("Generating the models.") if self.multishelled: model = dki.DiffusionKurtosisModel(self.gradient_table, fit_method=self.fit_method) else: model = dti.TensorModel(self.gradient_table, fit_method=self.fit_method) print("Denoising the data.") sigma = estimate_sigma(data, N=0) #MAY WANT TO UPDATE THIS WITH A FLAG. data = nlmeans(self.data.get_data(), sigma=sigma, mask=self.mask.get_data()) print("Fitting the data.") self.fitted = model.fit(data) #Generate the lower-triangular dataset if self.multishelled: print("Generating the kurtosis tensor data.") self.out_dti = nib.nifti1.Nifti1Image(self.fitted.lower_triangular(), self.data.get_affine()) self.out_dki = nib.nifti1.Nifti1Image(self.fitted.kt, self.data.get_affine()) else: print("Generating the tensor data.") self.out_dti = nib.nifti1.Nifti1Image(self.fitted.lower_triangular(), self.data.get_affine()) self.out_dki = None #Generate the residuals if self.out_residual_path != None or self.out_noise_path != None or self.out_snr_path != None: print("Estimating input data.") estimate_data = self.fitted.predict(self.gradient_table, S0=self.b0_average) print("Calculating residuals.") residuals = np.absolute(data - estimate_data) noise = np.std(residuals, axis=3) snr = np.nan_to_num(self.b0_average / noise) self.out_residual = nib.nifti1.Nifti1Image(residuals.astype(np.float32), self.data.get_affine()) self.out_noise = nib.nifti1.Nifti1Image(noise.astype(np.float32), self.data.get_affine()) self.out_snr = nib.nifti1.Nifti1Image(snr.astype(np.float32), self.data.get_affine())
def run_nonLocalMean(path_input,path_output): print(' - running NonLocal Mean algoritm...') finalFileName = os.path.join(path_output, utils.to_extract_filename(path_input) + d.id_non_local_mean + d.extension) if not (os.path.exists(finalFileName)): img = nib.load(path_input) data = img.get_data() newData = np.zeros(data.shape) gradientDirections = data.shape[-1] for index in range(gradientDirections): print(index) sigma = estimate_sigma(data[:, :, :, index], N=8) newData[:, :, :, index] = nlmeans(data[:, :, :, index], sigma=sigma) nib.save(nib.Nifti1Image(newData.astype(np.float32), img.affine), finalFileName) return finalFileName
def test_estimate_sigma(): sigma = estimate_sigma(np.ones((7, 7, 7)), disable_background_masking=True) assert_equal(sigma, 0.) sigma = estimate_sigma(np.ones((7, 7, 7, 3)), disable_background_masking=True) assert_equal(sigma, np.array([0., 0., 0.])) sigma = estimate_sigma(5 * np.ones((7, 7, 7)), disable_background_masking=False) assert_equal(sigma, 0.) sigma = estimate_sigma(5 * np.ones((7, 7, 7, 3)), disable_background_masking=False) assert_equal(sigma, np.array([0., 0., 0.])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False) assert_array_almost_equal(sigma, 0.10286889997472792) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=False) assert_array_almost_equal( sigma, np.array( [0.10286889997472792, 0.10286889997472792, 0.10286889997472792])) arr = np.zeros((3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True) assert_array_almost_equal(sigma, 0.46291005) arr = np.zeros((3, 3, 3, 3)) arr[0, 0, 0] = 1 sigma = estimate_sigma(arr, disable_background_masking=True) assert_array_almost_equal(sigma, np.array([0.46291005, 0.46291005, 0.46291005]))
def run(self, input_files, sigma=0, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """ Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm (default: auto estimation). out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional Name of the resuting denoised volume (default: dwi_nlmeans.nii.gz) """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising {0}'.format(fpath)) image = nib.load(fpath) data = image.get_data() if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma) denoised_image = nib.Nifti1Image( denoised_data, image.affine, image.header) denoised_image.to_filename(odenoised) logging.info('Denoised volume saved as {0}'.format(odenoised))
def img_denoise(data, patch_rad=3, block_rad=4, n_sigma=4): #data = np.load(img)['vol_data'] new_data = np.zeros((data.shape[1], data.shape[2], data.shape[0])) for ch in range(data.shape[0]): new_data[:, :, ch] = data[ch, :, :] sigma = estimate_sigma(new_data, N=n_sigma) #den = nlmeans(data, sigma=sigma, mask=mask, patch_radius= 1, block_radius = 1, rician= True) den = nlmeans(new_data, sigma=sigma, patch_radius=patch_rad, block_radius=block_rad, rician=True) #print("total time", time() - t) res_data = np.zeros(data.shape) for ch in range(data.shape[0]): res_data[ch, :, :] = den[:, :, ch] return res_data
def denoise(dt): from dipy.denoise import nlmeans from dipy.denoise.noise_estimate import estimate_sigma import itertools item = dt[0] image = item[1] mask = item[2] sigma = estimate_sigma(image) denoised_data = nlmeans.nlmeans(image, sigma=sigma, mask=mask) [xp,yp,zp] = [4,4,4] [xSize,ySize,zSize] = [denoised_data.shape[0]/xp, denoised_data.shape[1]/yp, denoised_data.shape[2]/zp] datalist = [] for x,y,z in itertools.product(range(xp), range(yp), range(zp)): [xS, yS, zS] = [x*xSize, y*ySize, z*zSize] [xE, yE, zE] = [denoised_data.shape[0] if x == xp - 1 else (x+1)*xSize, \ denoised_data.shape[1] if y == yp - 1 else (y+1)*ySize, \ denoised_data.shape[2] if z == zp - 1 else (z+1)*zSize] tup =(denoised_data[xS:xE, yS:yE, zS:zE],mask[xS:xE, yS:yE, zS:zE]) datalist.append(tup) return datalist
def denoise_dipy(input_dwi, input_bval, input_bvec, mask_image, output_dwi): #This function uses nlmeans as part of dipy to remove noise from images img = nib.load(input_dwi) data = img.get_data() mask = nib.load(mask_image).get_data() aff = img.get_affine() sform = img.get_sform() qform = img.get_qform() bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec) values = np.array(bvals) ii = np.where(values == bvals.min())[0] sigma = estimate_sigma(data) sigma = np.mean(sigma[ii]) den = nlmeans(data, sigma=sigma, mask=mask) den_img = nib.Nifti1Image(den.astype(np.float32), aff, img.header) den_img.set_sform(sform) den_img.set_qform(qform) nib.save(den_img, output_dwi)
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask): """ Compute DTI. :param fname_in: input 4d file. :param bvals: bvals txt file :param bvecs: bvecs txt file :param prefix: output prefix. Example: "dti_" :param method: algo for computing dti :param evecs: bool: output diffusion tensor eigenvectors :return: True/False """ # Open file. from spinalcordtoolbox.image import Image nii = Image(fname_in) data = nii.data sct.printv('data.shape (%d, %d, %d, %d)' % data.shape) # open bvecs/bvals bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs) gtab = gradient_table(bvals, bvecs) # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. if not file_mask == '': sct.printv('Open mask file...', param.verbose) # open mask file nii_mask = Image(file_mask) mask = nii_mask.data # fit tensor model sct.printv('Computing tensor using "' + method + '" method...', param.verbose) import dipy.reconst.dti as dti if method == 'standard': tenmodel = dti.TensorModel(gtab) if file_mask == '': tenfit = tenmodel.fit(data) else: tenfit = tenmodel.fit(data, mask) elif method == 'restore': import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) if file_mask == '': tenfit = dti_restore.fit(data) else: tenfit = dti_restore.fit(data, mask) # Compute metrics sct.printv('Computing metrics...', param.verbose) # FA nii.data = tenfit.fa nii.save(prefix + 'FA.nii.gz', dtype='float32') # MD nii.data = tenfit.md nii.save(prefix + 'MD.nii.gz', dtype='float32') # RD nii.data = tenfit.rd nii.save(prefix + 'RD.nii.gz', dtype='float32') # AD nii.data = tenfit.ad nii.save(prefix + 'AD.nii.gz', dtype='float32') if evecs: data_evecs = tenfit.evecs # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data for idim in range(3): nii.data = data_evecs[:, :, :, :, idim] nii.save(prefix + 'V' + str(idim + 1) + '.nii.gz', dtype="float32") return True
In places where the tensor model is particularly sensitive to noise, the resulting tensor field will be distorted .. figure:: tensor_ellipsoids_wls_noisy.png :align: center **Tensor Ellipsoids from noisy data**. To estimate the parameters from the noisy data using RESTORE, we need to estimate what would be a reasonable amount of noise to expect in the measurement. To do that, we use the `dipy.denoise.noise_estimate` module: """ import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) """ This estimate of the standard deviation will be used by the RESTORE algorithm to identify the outliers in each voxel and is given as an input when initializing the TensorModel object: """ dti_restore = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma) fit_restore_noisy = dti_restore.fit(noisy_data) fa3 = fit_restore_noisy.fa evals3 = fit_restore_noisy.evals evecs3 = fit_restore_noisy.evecs cfa3 = dti.color_fa(fa3, evecs3) ren = fvtk.ren()
mask = data[..., 0] > 80 data = data[..., 1] print("vol size", data.shape) t = time() """ In order to generate the two pre-denoised versions of the data we will use the ``non_local_means`` denoising. For ``non_local_means`` first we need to estimate the standard deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired on a 1.5T Siemens scanner with a 4 array head coil. """ sigma = estimate_sigma(data, N=4) """ For the denoised version of the original data which preserves sharper features, we perform non-local means with smaller patch size. """ den_small = non_local_means( data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) """
print("[{0}] Saving mask".format(datetime.datetime.now())) saving_mask_start = time() nib.save(nib.Nifti1Image(mask.astype(int), affine), mask_file) print("[{0}] Saved mask to {2} in {1}s".format(datetime.datetime.now(), time() - saving_mask_start, mask_file)) else: print("[{0}] Skipping mask save - already exists".format( datetime.datetime.now())) print("[{0}] Denoising".format(datetime.datetime.now())) denoise_start = time() from dipy.denoise import nlmeans from dipy.denoise.noise_estimate import estimate_sigma sigma = estimate_sigma(data) denoised_data = nlmeans.nlmeans(data, num_threads=8, sigma=sigma, mask=mask) print("[{0}] Finished denoising in {1}s".format(datetime.datetime.now(), time() - denoise_start)) denoised_file = file_prefix + 'denoised_data.nii.gz' if not op.exists(denoised_file): print("[{0}] Saving denoising result".format(datetime.datetime.now())) saving_denoise_start = time() nib.save(nib.Nifti1Image(denoised_data, affine), denoised_file) print("[{0}] Saved denoised file to {1} in {2}s".format( datetime.datetime.now(), denoised_file, time() - saving_denoise_start)) else: print("[{0}] Skipping denoised file save - already exists".format( datetime.datetime.now()))
def Kurtosis(dwi, mask): import numpy as np import dipy.reconst.dki as dki import dipy.reconst.dti as dti import dipy.reconst.dki_micro as dki_micro from dipy.data import fetch_cfin_multib from dipy.data import read_cfin_dwi from dipy.segment.mask import median_otsu from dipy.io.image import load_nifti, save_nifti from scipy.ndimage.filters import gaussian_filter import nibabel as nib from dipy.core.gradients import gradient_table from dipy.io import read_bvals_bvecs from sklearn import preprocessing import dipy.denoise.noise_estimate as ne # determine the noise needed for RESTORE import os bval = '/media/amr/HDD/Work/October_Acquistion/bval_multishell' bvec = '/media/amr/HDD/Work/October_Acquistion/bvec_multishell' protocol = '/media/amr/HDD/Work/October_Acquistion/MDT_multishell_protocol.prtcl' data, affine = load_nifti(dwi) mask, affine_mask = load_nifti(mask) protocol = np.loadtxt(protocol) fbval = bval fbvec = bvec bval, bvec = read_bvals_bvecs(fbval, fbvec) gnorm = protocol[:, 3] Delta = protocol[:, 4] delta = protocol[:, 5] TE = protocol[:, 6] TR = protocol[:, 8] if np.dot(bvec[5, :], bvec[5, :]) == 1.0: gtab = gradient_table(bval, bvec, big_delta=Delta, small_delta=delta, b0_threshold=0, atol=1) else: bvec = preprocessing.normalize(bvec, norm='l2') gtab = gradient_table(bval, bvec, big_delta=Delta, small_delta=delta, b0_threshold=0, atol=0.01) #without disable_background_masking, it does not work with some subjects sigma = ne.estimate_sigma(data, disable_background_masking=True) # dkimodel = dki.DiffusionKurtosisModel(gtab, fit_method='WLS') the old way also the default dkimodel = dki.DiffusionKurtosisModel(gtab, fit_method='RESTORE', sigma=sigma) #AWF and TORT from microstructure model dki_micro_model = dki_micro.KurtosisMicrostructureModel( gtab, fit_method='RESTORE') # fit the models dkifit = dkimodel.fit(data, mask=mask) dki_micro_fit = dki_micro_model.fit(data, mask=mask) FA = dkifit.fa MD = dkifit.md AD = dkifit.ad RD = dkifit.rd KA = dkifit.kfa MK = dkifit.mk(0, 3) AK = dkifit.ak(0, 3) RK = dkifit.rk(0, 3) AWF = dki_micro_fit.awf #Axonal watrer Fraction TORT = dki_micro_fit.tortuosity #Tortouisty save_nifti('DKI_FA.nii', FA, affine) save_nifti('DKI_MD.nii', MD, affine) save_nifti('DKI_AD.nii', AD, affine) save_nifti('DKI_RD.nii', RD, affine) save_nifti('DKI_KA.nii', KA, affine) save_nifti('DKI_MK.nii', MK, affine) save_nifti('DKI_AK.nii', AK, affine) save_nifti('DKI_RK.nii', RK, affine) save_nifti('DKI_AWF.nii', AWF, affine) save_nifti('DKI_TORT.nii', TORT, affine) DKI_FA = os.path.abspath('DKI_FA.nii') DKI_MD = os.path.abspath('DKI_MD.nii') DKI_AD = os.path.abspath('DKI_AD.nii') DKI_RD = os.path.abspath('DKI_RD.nii') DKI_KA = os.path.abspath('DKI_KA.nii') DKI_MK = os.path.abspath('DKI_MK.nii') DKI_AK = os.path.abspath('DKI_AK.nii') DKI_RK = os.path.abspath('DKI_RK.nii') DKI_AWF = os.path.abspath('DKI_AWF.nii') DKI_TORT = os.path.abspath('DKI_TORT.nii') return DKI_FA, DKI_MD, DKI_AD, DKI_RD, DKI_KA, DKI_MK, DKI_AK, DKI_RK, DKI_AWF, DKI_TORT
""" Since the diffusion kurtosis models involves the estimation of a large number of parameters [TaxCMW2015]_ and since the non-Gaussian components of the diffusion signal are more sensitive to artefacts [NetoHe2012]_, a fundamental data pre-processing step for diffusion kurtosis fitting is to denoise our data. For this, we use Dipy's non-local mean filter (see :ref:`example-denoise-nlmeans`). Note that, since the HCP-like data has a large number of diffusion-weigthed volumes, this procedure can take a couple of hours to compute the entire dataset. Therefore, to speed the run time in this example we only denoise an axial slice of the data. """ axial_slice = 40 sigma = estimate_sigma(data, N=4) mask_roi = np.zeros(data.shape[:-1], dtype=bool) mask_roi[:, :, axial_slice] = mask[:, :, axial_slice] den = nlmeans(data, sigma=sigma, mask=mask_roi) den = den[:, :, axial_slice, :] """ Now that we have loaded and prepared the voxels to process we can go forward with the voxel reconstruction. This can be done by first instantiating the DiffusionKurtosisModel in the following way: """ dkimodel = dki.DiffusionKurtosisModel(gtab)
def main(): parser = _build_args_parser() args = parser.parse_args() if not args.not_all: args.fa = args.fa or 'fa.nii.gz' args.ga = args.ga or 'ga.nii.gz' args.rgb = args.rgb or 'rgb.nii.gz' args.md = args.md or 'md.nii.gz' args.ad = args.ad or 'ad.nii.gz' args.rd = args.rd or 'rd.nii.gz' args.mode = args.mode or 'mode.nii.gz' args.norm = args.norm or 'tensor_norm.nii.gz' args.tensor = args.tensor or 'tensor.nii.gz' args.evecs = args.evecs or 'tensor_evecs.nii.gz' args.evals = args.evals or 'tensor_evals.nii.gz' args.residual = args.residual or 'dti_residual.nii.gz' args.p_i_signal =\ args.p_i_signal or 'physically_implausible_signals_mask.nii.gz' args.pulsation = args.pulsation or 'pulsation_and_misalignment.nii.gz' outputs = [args.fa, args.ga, args.rgb, args.md, args.ad, args.rd, args.mode, args.norm, args.tensor, args.evecs, args.evals, args.residual, args.p_i_signal, args.pulsation] if args.not_all and not any(outputs): parser.error('When using --not_all, you need to specify at least ' + 'one metric to output.') assert_inputs_exist( parser, [args.input, args.bvals, args.bvecs], args.mask) assert_outputs_exist(parser, args, outputs) img = nib.load(args.input) data = img.get_data() affine = img.get_affine() if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) # Validate bvals and bvecs logging.info('Tensor estimation with the %s method...', args.method) bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) check_b0_threshold(args, bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) # Get tensors if args.method == 'restore': sigma = ne.estimate_sigma(data) tenmodel = TensorModel(gtab, fit_method=args.method, sigma=sigma, min_signal=_get_min_nonzero_signal(data)) else: tenmodel = TensorModel(gtab, fit_method=args.method, min_signal=_get_min_nonzero_signal(data)) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) if args.tensor: # Get the Tensor values and format them for visualisation # in the Fibernavigator. tensor_vals = lower_triangular(tenfit.quadratic_form) correct_order = [0, 1, 3, 2, 4, 5] tensor_vals_reordered = tensor_vals[..., correct_order] fiber_tensors = nib.Nifti1Image( tensor_vals_reordered.astype(np.float32), affine) nib.save(fiber_tensors, args.tensor) if args.fa: fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img, args.fa) if args.ga: GA = geodesic_anisotropy(tenfit.evals) GA[np.isnan(GA)] = 0 ga_img = nib.Nifti1Image(GA.astype(np.float32), affine) nib.save(ga_img, args.ga) if args.rgb: RGB = color_fa(FA, tenfit.evecs) rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine) nib.save(rgb_img, args.rgb) if args.md: MD = mean_diffusivity(tenfit.evals) md_img = nib.Nifti1Image(MD.astype(np.float32), affine) nib.save(md_img, args.md) if args.ad: AD = axial_diffusivity(tenfit.evals) ad_img = nib.Nifti1Image(AD.astype(np.float32), affine) nib.save(ad_img, args.ad) if args.rd: RD = radial_diffusivity(tenfit.evals) rd_img = nib.Nifti1Image(RD.astype(np.float32), affine) nib.save(rd_img, args.rd) if args.mode: # Compute tensor mode inter_mode = dipy_mode(tenfit.quadratic_form) # Since the mode computation can generate NANs when not masked, # we need to remove them. non_nan_indices = np.isfinite(inter_mode) mode = np.zeros(inter_mode.shape) mode[non_nan_indices] = inter_mode[non_nan_indices] mode_img = nib.Nifti1Image(mode.astype(np.float32), affine) nib.save(mode_img, args.mode) if args.norm: NORM = norm(tenfit.quadratic_form) norm_img = nib.Nifti1Image(NORM.astype(np.float32), affine) nib.save(norm_img, args.norm) if args.evecs: evecs = tenfit.evecs.astype(np.float32) evecs_img = nib.Nifti1Image(evecs, affine) nib.save(evecs_img, args.evecs) # save individual e-vectors also e1_img = nib.Nifti1Image(evecs[..., 0], affine) e2_img = nib.Nifti1Image(evecs[..., 1], affine) e3_img = nib.Nifti1Image(evecs[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evecs, '_v1')) nib.save(e2_img, add_filename_suffix(args.evecs, '_v2')) nib.save(e3_img, add_filename_suffix(args.evecs, '_v3')) if args.evals: evals = tenfit.evals.astype(np.float32) evals_img = nib.Nifti1Image(evals, affine) nib.save(evals_img, args.evals) # save individual e-values also e1_img = nib.Nifti1Image(evals[..., 0], affine) e2_img = nib.Nifti1Image(evals[..., 1], affine) e3_img = nib.Nifti1Image(evals[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evals, '_e1')) nib.save(e2_img, add_filename_suffix(args.evals, '_e2')) nib.save(e3_img, add_filename_suffix(args.evals, '_e3')) if args.p_i_signal: S0 = np.mean(data[..., gtab.b0s_mask], axis=-1, keepdims=True) DWI = data[..., ~gtab.b0s_mask] pis_mask = np.max(S0 < DWI, axis=-1) if args.mask is not None: pis_mask *= mask pis_img = nib.Nifti1Image(pis_mask.astype(np.int16), affine) nib.save(pis_img, args.p_i_signal) if args.pulsation: STD = np.std(data[..., ~gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_dwi')) if np.sum(gtab.b0s_mask) <= 1: logger.info('Not enough b=0 images to output standard ' 'deviation map') else: if len(np.where(gtab.b0s_mask)) == 2: logger.info('Only two b=0 images. Be careful with the ' 'interpretation of this std map') STD = np.std(data[..., gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_b0')) if args.residual: # Mean residual image S0 = np.mean(data[..., gtab.b0s_mask], axis=-1) data_p = tenfit.predict(gtab, S0) R = np.mean(np.abs(data_p[..., ~gtab.b0s_mask] - data[..., ~gtab.b0s_mask]), axis=-1) if args.mask is not None: R *= mask R_img = nib.Nifti1Image(R.astype(np.float32), affine) nib.save(R_img, args.residual) # Each volume's residual statistics if args.mask is None: logger.info("Outlier detection will not be performed, since no " "mask was provided.") stats = [dict.fromkeys(['label', 'mean', 'iqr', 'cilo', 'cihi', 'whishi', 'whislo', 'fliers', 'q1', 'med', 'q3'], []) for i in range(data.shape[-1])] # stats with format for boxplots # Note that stats will be computed manually and plotted using bxp # but could be computed using stats = cbook.boxplot_stats # or pyplot.boxplot(x) R_k = np.zeros(data.shape[-1]) # mean residual per DWI std = np.zeros(data.shape[-1]) # std residual per DWI q1 = np.zeros(data.shape[-1]) # first quartile per DWI q3 = np.zeros(data.shape[-1]) # third quartile per DWI iqr = np.zeros(data.shape[-1]) # interquartile per DWI percent_outliers = np.zeros(data.shape[-1]) nb_voxels = np.count_nonzero(mask) for k in range(data.shape[-1]): x = np.abs(data_p[..., k] - data[..., k])[mask] R_k[k] = np.mean(x) std[k] = np.std(x) q3[k], q1[k] = np.percentile(x, [75, 25]) iqr[k] = q3[k] - q1[k] stats[k]['med'] = (q1[k] + q3[k]) / 2 stats[k]['mean'] = R_k[k] stats[k]['q1'] = q1[k] stats[k]['q3'] = q3[k] stats[k]['whislo'] = q1[k] - 1.5 * iqr[k] stats[k]['whishi'] = q3[k] + 1.5 * iqr[k] stats[k]['label'] = k # Outliers are observations that fall below Q1 - 1.5(IQR) or # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if # we have a mask, else we are biased. if args.mask is not None: outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi']) percent_outliers[k] = np.sum(outliers)/nb_voxels*100 # What would be our definition of too many outliers? # Maybe mean(all_means)+-3SD? # Or we let people choose based on the figure. # if percent_outliers[k] > ???? : # logger.warning(' Careful! Diffusion-Weighted Image' # ' i=%s has %s %% outlier voxels', # k, percent_outliers[k]) # Saving all statistics as npy values residual_basename, _ = split_name_with_nii(args.residual) res_stats_basename = residual_basename + ".npy" np.save(add_filename_suffix( res_stats_basename, "_mean_residuals"), R_k) np.save(add_filename_suffix(res_stats_basename, "_q1_residuals"), q1) np.save(add_filename_suffix(res_stats_basename, "_q3_residuals"), q3) np.save(add_filename_suffix(res_stats_basename, "_iqr_residuals"), iqr) np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std) # Showing results in graph if args.mask is None: fig, axe = plt.subplots(nrows=1, ncols=1, squeeze=False) else: fig, axe = plt.subplots(nrows=1, ncols=2, squeeze=False, figsize=[10, 4.8]) # Default is [6.4, 4.8]. Increasing width to see better. medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick') meanprops = dict(linestyle='-', linewidth=2.5, color='green') axe[0, 0].bxp(stats, showmeans=True, meanline=True, showfliers=False, medianprops=medianprops, meanprops=meanprops) axe[0, 0].set_xlabel('DW image') axe[0, 0].set_ylabel('Residuals per DWI volume. Red is median,\n' 'green is mean. Whiskers are 1.5*interquartile') axe[0, 0].set_title('Residuals') axe[0, 0].set_xticks(range(0, q1.shape[0], 5)) axe[0, 0].set_xticklabels(range(0, q1.shape[0], 5)) if args.mask is not None: axe[0, 1].plot(range(data.shape[-1]), percent_outliers) axe[0, 1].set_xticks(range(0, q1.shape[0], 5)) axe[0, 1].set_xticklabels(range(0, q1.shape[0], 5)) axe[0, 1].set_xlabel('DW image') axe[0, 1].set_ylabel('Percentage of outlier voxels') axe[0, 1].set_title('Outliers') plt.savefig(residual_basename + '_residuals_stats.png')
In places where the tensor model is particularly sensitive to noise, the resulting tensor field will be distorted .. figure:: tensor_ellipsoids_wls_noisy.png :align: center Tensor Ellipsoids from noisy data. To estimate the parameters from the noisy data using RESTORE, we need to estimate what would be a reasonable amount of noise to expect in the measurement. To do that, we use the ``dipy.denoise.noise_estimate`` module: """ import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) """ This estimate of the standard deviation will be used by the RESTORE algorithm to identify the outliers in each voxel and is given as an input when initializing the TensorModel object: """ dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) fit_restore_noisy = dti_restore.fit(noisy_data) fa3 = fit_restore_noisy.fa evals3 = fit_restore_noisy.evals evecs3 = fit_restore_noisy.evecs cfa3 = dti.color_fa(fa3, evecs3) ren = fvtk.ren() fvtk.add(ren, fvtk.tensor(evals3, evecs3, cfa3, sphere))
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, file_mask): """ Compute DTI. :param fname_in: input 4d file. :param bvals: bvals txt file :param bvecs: bvecs txt file :param prefix: output prefix. Example: "dti_" :param method: algo for computing dti :return: True/False """ # Open file. from msct_image import Image nii = Image(fname_in) data = nii.data print('data.shape (%d, %d, %d, %d)' % data.shape) # open bvecs/bvals from dipy.io import read_bvals_bvecs bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs) from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. if not file_mask == '': printv('Open mask file...', param.verbose) # open mask file nii_mask = Image(file_mask) mask = nii_mask.data # fit tensor model printv('Computing tensor using "'+method+'" method...', param.verbose) import dipy.reconst.dti as dti if method == 'standard': tenmodel = dti.TensorModel(gtab) if file_mask == '': tenfit = tenmodel.fit(data) else: tenfit = tenmodel.fit(data, mask) elif method == 'restore': import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) if file_mask == '': tenfit = dti_restore.fit(data) else: tenfit = dti_restore.fit(data, mask) # Compute metrics printv('Computing metrics...', param.verbose) # FA from dipy.reconst.dti import fractional_anisotropy nii.data = fractional_anisotropy(tenfit.evals) nii.setFileName(prefix+'FA.nii.gz') nii.save('float32') # MD from dipy.reconst.dti import mean_diffusivity nii.data = mean_diffusivity(tenfit.evals) nii.setFileName(prefix+'MD.nii.gz') nii.save('float32') # RD from dipy.reconst.dti import radial_diffusivity nii.data = radial_diffusivity(tenfit.evals) nii.setFileName(prefix+'RD.nii.gz') nii.save('float32') # AD from dipy.reconst.dti import axial_diffusivity nii.data = axial_diffusivity(tenfit.evals) nii.setFileName(prefix+'AD.nii.gz') nii.save('float32') return True
def main(): parser = buildArgsParser() args = parser.parse_args() vol = nib.load(args.input) data = vol.get_data() affine = vol.get_affine() if args.mask is None: mask = np.ones(data.shape[:-1], dtype=np.bool) else: mask = nib.load(args.mask).get_data().astype(np.bool) N = args.N if args.n_cores is None: n_cores = cpu_count() else: if args.n_cores > cpu_count(): n_cores = cpu_count() else: n_cores = args.n_cores noise_method = args.noise_method smooth_method = args.smooth_method filename = args.output if noise_method == 'noise_map': if args.noise_maps is None: raise ValueError( 'You need to supply --noise_map path_to_file to use --noise_est noise_map' ) noise_maps = nib.load(args.noise_maps).get_data() # Since negatives are allowed, convert uint to int if data.dtype.kind == 'u': dtype = data.dtype.name[1:] else: dtype = data.dtype logging.info("Estimating m_hat with method " + smooth_method) if smooth_method == 'local_mean': m_hat = np.zeros_like(data, dtype=np.float32) size = (3, 3, 3) k = np.ones(size) / np.sum(size) conv_out = np.zeros_like(data[..., 0], dtype=np.float64) for idx in range(data.shape[-1]): convolve(data[..., idx], k, mode='reflect', output=conv_out) m_hat[..., idx] = conv_out elif smooth_method == 'nlmeans': nlmeans_sigma = estimate_sigma(data) m_hat = nlmeans(data, nlmeans_sigma, rician=False, mask=mask) elif smooth_method == 'no_smoothing': m_hat = np.array(data, copy=True, dtype=np.float32) elif smooth_method == 'sh_smooth': bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) gtab = gradient_table(bvals, bvecs) m_hat = sh_smooth(data, gtab, sh_order=4) logging.info("Estimating noise with method " + noise_method) if noise_method == 'piesno': sigma = np.zeros_like(data, dtype=np.float32) mask_noise = np.zeros(data.shape[:-1], dtype=np.int16) for idx in range(data.shape[-2]): logging.info("Now processing slice", idx + 1, "out of", data.shape[-2]) sigma[..., idx, :], mask_noise[..., idx] = piesno(data[..., idx, :], N=N, return_mask=True) if args.save_piesno_mask is not None: nib.save(nib.Nifti1Image(mask_noise.astype(np.int16), affine), args.save_piesno_mask) elif noise_method == 'local_std': sigma_3D = local_standard_deviation(data, n_cores=n_cores) # Compute the corrected value for each 3D volume sigma = corrected_sigma(m_hat, np.repeat(sigma_3D[..., None], data.shape[-1], axis=-1), np.repeat(mask[..., None], data.shape[-1], axis=-1), N, n_cores=n_cores) elif noise_method == 'noise_map': # Local piesno works on 4D, so we need to broadcast before if noise_maps.ndim == 3: noise_maps = noise_maps[..., None] sigma, mask_noise = local_piesno(noise_maps, N=N, return_mask=True) sigma = np.repeat(sigma[..., None], data.shape[-1], axis=-1) if args.save_piesno_mask is not None: nib.save(nib.Nifti1Image(mask_noise.astype(np.int16), affine), args.save_piesno_mask) nib.save(nib.Nifti1Image(sigma, affine), args.sigma) logging.info("Now performing stabilisation") pool = Pool(processes=n_cores) arglist = [(data[..., idx, :], m_hat[..., idx, :], np.repeat(mask[..., idx, None], data.shape[-1], axis=-1), sigma[..., idx, :], N_vox) for idx, N_vox in zip(range(data.shape[-2]), repeat(N))] data_out = pool.map(multiprocess_stabilisation, arglist) pool.close() pool.join() data_stabilized = np.empty(data.shape, dtype=dtype) for idx in range(len(data_out)): data_stabilized[..., idx, :] = data_out[idx] nib.save(nib.Nifti1Image(data_stabilized, affine), filename)
path_output = sys.argv[2] from tareas.dependencias import utils from tareas.dependencias import definitions as d import nibabel as nib import numpy as np from dipy.denoise.noise_estimate import estimate_sigma from dipy.denoise.nlmeans import nlmeans print(' - running NonLocal Mean algoritm...') finalFileName = os.path.join( path_output, utils.to_extract_filename(path_input) + d.id_non_local_mean + d.extension) if not (os.path.exists(finalFileName)): img = nib.load(path_input) data = img.get_data() newData = np.zeros(data.shape) gradientDirections = data.shape[-1] for index in range(gradientDirections): print(index) sigma = estimate_sigma(data[:, :, :, index], N=8) newData[:, :, :, index] = nlmeans(data[:, :, :, index], sigma=sigma) nib.save(nib.Nifti1Image(newData.astype(np.float32), img.affine), finalFileName) print(finalFileName)
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask): """ Compute DTI. :param fname_in: input 4d file. :param bvals: bvals txt file :param bvecs: bvecs txt file :param prefix: output prefix. Example: "dti_" :param method: algo for computing dti :param evecs: bool: output diffusion tensor eigenvectors and eigenvalues :return: True/False """ # Open file. from spinalcordtoolbox.image import Image nii = Image(fname_in) data = nii.data sct.printv('data.shape (%d, %d, %d, %d)' % data.shape) # open bvecs/bvals bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs) gtab = gradient_table(bvals, bvecs) # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. if not file_mask == '': sct.printv('Open mask file...', param.verbose) # open mask file nii_mask = Image(file_mask) mask = nii_mask.data # fit tensor model sct.printv('Computing tensor using "' + method + '" method...', param.verbose) import dipy.reconst.dti as dti if method == 'standard': tenmodel = dti.TensorModel(gtab) if file_mask == '': tenfit = tenmodel.fit(data) else: tenfit = tenmodel.fit(data, mask) elif method == 'restore': import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) if file_mask == '': tenfit = dti_restore.fit(data) else: tenfit = dti_restore.fit(data, mask) # Compute metrics sct.printv('Computing metrics...', param.verbose) # FA nii.data = tenfit.fa nii.save(prefix + 'FA.nii.gz', dtype='float32') # MD nii.data = tenfit.md nii.save(prefix + 'MD.nii.gz', dtype='float32') # RD nii.data = tenfit.rd nii.save(prefix + 'RD.nii.gz', dtype='float32') # AD nii.data = tenfit.ad nii.save(prefix + 'AD.nii.gz', dtype='float32') if evecs: data_evecs = tenfit.evecs data_evals = tenfit.evals # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data for idim in range(3): nii.data = data_evecs[:, :, :, :, idim] nii.save(prefix + 'V' + str(idim+1) + '.nii.gz', dtype="float32") nii.data = data_evals[:, :, :, idim] nii.save(prefix + 'E' + str(idim+1) + '.nii.gz', dtype="float32") return True
import dipy.denoise.noise_estimate as ne import dipy.io as io import dipy.core.gradients as cg # load dti data raw=nib.load(rawfile) dataraw=raw.get_data() img = nib.load(dtifile) data = img.get_data() # load bvec and bvals bvals, bvecs = io.read_bvals_bvecs(dtibval, dtibvec) gtab = cg.gradient_table(bvals, bvecs) # noise estimation from the b=0 sigma = ne.estimate_sigma(dataraw[:,:,:,bvals==0]) sigmamean=np.mean(sigma) # tensor computation using restore tenmodel=dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigmamean) tenfit = tenmodel.fit(data) # Derivated measures from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity, radial_diffusivity, axial_diffusivity,mode FA = fractional_anisotropy(tenfit.evals) MD = mean_diffusivity(tenfit.evals) AD = axial_diffusivity(tenfit.evals) RD = radial_diffusivity(tenfit.evals) MO = mode(tenfit.evecs) tenfit.evals[np.isnan(tenfit.evals)] = 0
def load_data(name="sherbrooke", a=.5, flat=True): """ Loads the raw data :param name: str options: "sherbrooke", "hardi", "siemens" (all MRI) "j0126" (EM) :param a: float specifies the angle of the used MRI data 0 <= a < 1. :param flat: bool True: Transform data in the format used along with spark :return: either: new_data: flatted raw data or: data: 3d raw data sigma: noise estimation """ if name == "sherbrooke": img, gtab = read_sherbrooke_3shell() elif name == "hardi": img, gtab = read_stanford_hardi() elif name == "siemens": img = read_siemens_scil_b0() elif name == "j0126": data = np.load(path_to_folder + "/data/j0126_sample.npy") else: raise Exception() if not "j0126" in name: data = img.get_data() d_type = np.int else: d_type = np.uint8 print("Original shape:", data.shape) z = int((data.shape[2]-sh[2])/2) if not "j0126" in name: a = int(data.shape[3]*a) data = data[: sh[0], : sh[1], z: z+sh[2], a].astype(np.int32) else: data = data[: sh[0], : sh[1], z: z+sh[2]].astype(np.int32) sigma = noise_estimate.estimate_sigma(data, N=4) print(np.mean(sigma)) if flat: if len(sigma.shape) == 1: sigma = np.ones_like(data)*sigma[0] new_data = [] for x in range(0, data.shape[0], DB): for y in range(0, data.shape[1], DB): for z in range(0, data.shape[2], DB): data_block = np.zeros([DB, DB, DB], dtype=d_type) sigma_block = np.ones([DB, DB, DB], dtype=d_type) d_sh = data[x: x+DB, y: y+DB, z: z+DB].shape data_block[0: d_sh[0], 0: d_sh[1], 0: d_sh[2]] = data[x: x+DB, y: y+DB, z: z+DB] sigma_block[0: d_sh[0], 0: d_sh[1], 0: d_sh[2]] = sigma[x: x+DB, y: y+DB, z: z+DB] new_data.append([_coordinate_to_index([x, y, z]), [data_block, sigma_block]]) return new_data else: return data, sigma
def main(): parser = _build_args_parser() args = parser.parse_args() if not args.not_all: args.fa = args.fa or 'fa.nii.gz' args.ga = args.ga or 'ga.nii.gz' args.rgb = args.rgb or 'rgb.nii.gz' args.md = args.md or 'md.nii.gz' args.ad = args.ad or 'ad.nii.gz' args.rd = args.rd or 'rd.nii.gz' args.mode = args.mode or 'mode.nii.gz' args.norm = args.norm or 'tensor_norm.nii.gz' args.tensor = args.tensor or 'tensor.nii.gz' args.evecs = args.evecs or 'tensor_evecs.nii.gz' args.evals = args.evals or 'tensor_evals.nii.gz' args.residual = args.residual or 'dti_residual.nii.gz' args.p_i_signal =\ args.p_i_signal or 'physically_implausible_signals_mask.nii.gz' args.pulsation = args.pulsation or 'pulsation_and_misalignment.nii.gz' outputs = [args.fa, args.ga, args.rgb, args.md, args.ad, args.rd, args.mode, args.norm, args.tensor, args.evecs, args.evals, args.residual, args.p_i_signal, args.pulsation] if args.not_all and not any(outputs): parser.error('When using --not_all, you need to specify at least ' + 'one metric to output.') assert_inputs_exist( parser, [args.input, args.bvals, args.bvecs], [args.mask]) assert_outputs_exists(parser, args, outputs) img = nib.load(args.input) data = img.get_data() affine = img.get_affine() if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) # Validate bvals and bvecs logging.info('Tensor estimation with the %s method...', args.method) bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) check_b0_threshold(args, bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) # Get tensors if args.method == 'restore': sigma = ne.estimate_sigma(data) tenmodel = TensorModel(gtab, fit_method=args.method, sigma=sigma, min_signal=_get_min_nonzero_signal(data)) else: tenmodel = TensorModel(gtab, fit_method=args.method, min_signal=_get_min_nonzero_signal(data)) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) if args.tensor: # Get the Tensor values and format them for visualisation # in the Fibernavigator. tensor_vals = lower_triangular(tenfit.quadratic_form) correct_order = [0, 1, 3, 2, 4, 5] tensor_vals_reordered = tensor_vals[..., correct_order] fiber_tensors = nib.Nifti1Image( tensor_vals_reordered.astype(np.float32), affine) nib.save(fiber_tensors, args.tensor) if args.fa: fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img, args.fa) if args.ga: GA = geodesic_anisotropy(tenfit.evals) GA[np.isnan(GA)] = 0 ga_img = nib.Nifti1Image(GA.astype(np.float32), affine) nib.save(ga_img, args.ga) if args.rgb: RGB = color_fa(FA, tenfit.evecs) rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine) nib.save(rgb_img, args.rgb) if args.md: MD = mean_diffusivity(tenfit.evals) md_img = nib.Nifti1Image(MD.astype(np.float32), affine) nib.save(md_img, args.md) if args.ad: AD = axial_diffusivity(tenfit.evals) ad_img = nib.Nifti1Image(AD.astype(np.float32), affine) nib.save(ad_img, args.ad) if args.rd: RD = radial_diffusivity(tenfit.evals) rd_img = nib.Nifti1Image(RD.astype(np.float32), affine) nib.save(rd_img, args.rd) if args.mode: # Compute tensor mode inter_mode = dipy_mode(tenfit.quadratic_form) # Since the mode computation can generate NANs when not masked, # we need to remove them. non_nan_indices = np.isfinite(inter_mode) mode = np.zeros(inter_mode.shape) mode[non_nan_indices] = inter_mode[non_nan_indices] mode_img = nib.Nifti1Image(mode.astype(np.float32), affine) nib.save(mode_img, args.mode) if args.norm: NORM = norm(tenfit.quadratic_form) norm_img = nib.Nifti1Image(NORM.astype(np.float32), affine) nib.save(norm_img, args.norm) if args.evecs: evecs = tenfit.evecs.astype(np.float32) evecs_img = nib.Nifti1Image(evecs, affine) nib.save(evecs_img, args.evecs) # save individual e-vectors also e1_img = nib.Nifti1Image(evecs[..., 0], affine) e2_img = nib.Nifti1Image(evecs[..., 1], affine) e3_img = nib.Nifti1Image(evecs[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evecs, '_v1')) nib.save(e2_img, add_filename_suffix(args.evecs, '_v2')) nib.save(e3_img, add_filename_suffix(args.evecs, '_v3')) if args.evals: evals = tenfit.evals.astype(np.float32) evals_img = nib.Nifti1Image(evals, affine) nib.save(evals_img, args.evals) # save individual e-values also e1_img = nib.Nifti1Image(evals[..., 0], affine) e2_img = nib.Nifti1Image(evals[..., 1], affine) e3_img = nib.Nifti1Image(evals[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evals, '_e1')) nib.save(e2_img, add_filename_suffix(args.evals, '_e2')) nib.save(e3_img, add_filename_suffix(args.evals, '_e3')) if args.p_i_signal: S0 = np.mean(data[..., gtab.b0s_mask], axis=-1, keepdims=True) DWI = data[..., ~gtab.b0s_mask] pis_mask = np.max(S0 < DWI, axis=-1) if args.mask is not None: pis_mask *= mask pis_img = nib.Nifti1Image(pis_mask.astype(np.int16), affine) nib.save(pis_img, args.p_i_signal) if args.pulsation: STD = np.std(data[..., ~gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_dwi')) if np.sum(gtab.b0s_mask) <= 1: logger.info('Not enough b=0 images to output standard ' 'deviation map') else: if len(np.where(gtab.b0s_mask)) == 2: logger.info('Only two b=0 images. Be careful with the ' 'interpretation of this std map') STD = np.std(data[..., gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_b0')) if args.residual: if args.mask is None: logger.info("Outlier detection will not be performed, since no " "mask was provided.") S0 = np.mean(data[..., gtab.b0s_mask], axis=-1) data_p = tenfit.predict(gtab, S0) R = np.mean(np.abs(data_p[..., ~gtab.b0s_mask] - data[..., ~gtab.b0s_mask]), axis=-1) if args.mask is not None: R *= mask R_img = nib.Nifti1Image(R.astype(np.float32), affine) nib.save(R_img, args.residual) R_k = np.zeros(data.shape[-1]) # mean residual per DWI std = np.zeros(data.shape[-1]) # std residual per DWI q1 = np.zeros(data.shape[-1]) # first quartile q3 = np.zeros(data.shape[-1]) # third quartile iqr = np.zeros(data.shape[-1]) # interquartile for i in range(data.shape[-1]): x = np.abs(data_p[..., i] - data[..., i])[mask] R_k[i] = np.mean(x) std[i] = np.std(x) q3[i], q1[i] = np.percentile(x, [75, 25]) iqr[i] = q3[i] - q1[i] # Outliers are observations that fall below Q1 - 1.5(IQR) or # above Q3 + 1.5(IQR) We check if a volume is an outlier only if # we have a mask, else we are biased. if args.mask is not None and R_k[i] < (q1[i] - 1.5 * iqr[i]) \ or R_k[i] > (q3[i] + 1.5 * iqr[i]): logger.warning('WARNING: Diffusion-Weighted Image i=%s is an ' 'outlier', i) residual_basename, _ = split_name_with_nii(args.residual) res_stats_basename = residual_basename + ".npy" np.save(add_filename_suffix( res_stats_basename, "_mean_residuals"), R_k) np.save(add_filename_suffix(res_stats_basename, "_q1_residuals"), q1) np.save(add_filename_suffix(res_stats_basename, "_q3_residuals"), q3) np.save(add_filename_suffix(res_stats_basename, "_iqr_residuals"), iqr) np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std) # To do: I would like to have an error bar with q1 and q3. # Now, q1 acts as a std dwi = np.arange(R_k[~gtab.b0s_mask].shape[0]) plt.bar(dwi, R_k[~gtab.b0s_mask], 0.75, color='y', yerr=q1[~gtab.b0s_mask]) plt.xlabel('DW image') plt.ylabel('Mean residuals +- q1') plt.title('Residuals') plt.savefig(residual_basename + '_residuals_stats.png')