Example #1
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    where_b0 = np.zeros(65, 'bool')
    where_b0[0] = True
    d = normalize_data(sig, where_b0, 1)
    assert_raises(ValueError, normalize_data, sig, where_b0, out=sig)

    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[-5:], 5 / 65.)

    where_b0[[0, 1]] = [True, True]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[-5:], 5 / 64.5)

    sig = sig * np.ones((2, 3, 1))

    where_b0[[0, 1]] = [True, False]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[..., -5:], 5 / 65.)

    where_b0[[0, 1]] = [True, True]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[..., -5:], 5 / 64.5)
Example #2
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    where_b0 = np.zeros(65, 'bool')
    where_b0[0] = True
    d = normalize_data(sig, where_b0, 1)
    assert_raises(ValueError, normalize_data, sig, where_b0, out=sig)

    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[-5:], 5 / 65.)

    where_b0[[0, 1]] = [True, True]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[-5:], 5 / 64.5)

    sig = sig * np.ones((2, 3, 1))

    where_b0[[0, 1]] = [True, False]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[..., -5:], 5 / 65.)

    where_b0[[0, 1]] = [True, True]
    norm_sig = normalize_data(sig, where_b0, min_signal=1)
    assert_array_almost_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, where_b0, min_signal=5)
    assert_array_almost_equal(norm_sig[..., -5:], 5 / 64.5)
def main():
    #set some values to be used later
    sh_order = 6
    verts, edges, efaces = create_unit_sphere(4)

    #read_data from disk
    data, fa, bvec, bval, voxel_size = sample_hardi_data()
    data_slice = data[32:76, 32:76, 26:27]
    fa_slice = fa[32:76, 32:76, 26]

    #normalize data by dividing by b0, this is needed so we can take log later
    norm_data = normalize_data(data_slice, bval, min_signal=1)

    #create an instance of the model
    model_instance = MonoExpOpdfModel(sh_order, bval, bvec, .006)
    model_instance.set_sampling_points(verts, edges)

    #use the model it fit the data
    opdfs_sampled_at_verts = model_instance.evaluate(norm_data)
    opdfs_sph_harm_coef = model_instance.fit_data(norm_data)

    #display the opdf blobs using mayavi
    faces = edges[efaces, 0]
    show_blobs(opdfs_sampled_at_verts, verts, faces)
    mlab.imshow(fa_slice, colormap='gray', interpolate=False)
    mlab.show()
Example #4
0
    def _global_fit(self, data, mask=None):
        '''
        Fit fODF and GM/CSF volume fractions globally.

        Parameters
        ----------
        model : RumbaSDModel
            RumbaSDModel model
        data : ndarray (x, y, z, N)
            Signal values for each voxel. Must be 4D.
        mask : ndarray (x, y, z), optional
            Binary mask specifying voxels of interest with 1; results will only
            be fit at these voxels (0 elsewhere). If `None`, fits all voxels.
            Default: None.

        Returns
        -------
        model_fit : RumbaFit
            Fit object storing model parameters.

        '''

        # Checking data and mask shapes
        if len(data.shape) != 4:
            raise ValueError(
                f"Data should be 4D, received shape f{data.shape}")

        if mask is None:  # default mask includes all voxels
            mask = np.ones(data.shape[:3])

        if data.shape[:3] != mask.shape:
            raise ValueError("Mask shape should match first 3 dimensions of " +
                             f"data, but data dimensions are f{data.shape} " +
                             f"while mask dimensions are f{mask.shape}")

        # Signal repair, normalization

        # Normalize data to mean b0 image
        data = normalize_data(data, self.where_b0s, _EPS)
        # Rearrange data to match corrected gradient table
        data = np.concatenate(
            (np.ones([*data.shape[:3], 1]), data[..., self.where_dwi]), axis=3)
        data[data > 1] = 1  # clip values between 0 and 1

        # All arrays are converted to float32 to reduce memory load
        data = data.astype(np.float32)

        # Generate kernel
        self.kernel = generate_kernel(self.gtab, self.sphere, self.wm_response,
                                      self.gm_response,
                                      self.csf_response).astype(np.float32)

        # Fit fODF
        model_params = rumba_deconv_global(data, self.kernel, mask,
                                           self.n_iter, self.recon_type,
                                           self.n_coils, self.R, self.use_tv,
                                           self.verbose)

        model_fit = RumbaFit(self, model_params)
        return model_fit
Example #5
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    bval = np.zeros(64)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.zeros(65)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.ones(65)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval[0] = 0
    d = normalize_data(sig, bval, 1)
    assert_raises(ValueError, normalize_data, None, bval, 0)

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 1:] / 65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 2:] / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 64.5)

    sig = sig * np.ones((2, 3, 1))

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 1:] / 65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 2:] / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 64.5)

    sig[..., -1] = 100.
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig[..., :-1], sig[..., 2:-1] / 64.5)
    assert_array_equal(norm_sig[..., -1], 1)
Example #6
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    bval = np.zeros(64)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.zeros(65)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.ones(65)
    assert_raises(ValueError, normalize_data, sig, bval)
    bval[0] = 0
    d = normalize_data(sig, bval, 1)
    assert_raises(ValueError, normalize_data, None, bval, 0)

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 1:]/65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5/65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 2:]/64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5/64.5)

    sig = sig*np.ones((2,3,1))

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 1:]/65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5/65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig[..., 2:]/64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5/64.5)

    sig[..., -1] = 100.
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig[...,:-1], sig[..., 2:-1]/64.5)
    assert_array_equal(norm_sig[..., -1], 1)
Example #7
0
    def _voxelwise_fit(self, data, mask=None):
        '''
        Fit fODF and GM/CSF volume fractions voxelwise.

        Parameters
        ----------
        model : RumbaSDModel
            RumbaSDModel model
        data : ndarray ([x, y, z], N)
            Signal values for each voxel.
        mask : ndarray ([x, y, z]), optional
            Binary mask specifying voxels of interest with 1; results will only
            be fit at these voxels (0 elsewhere). If `None`, fits all voxels.
            Default: None.

        Returns
        -------
        model_fit : RumbaFit
            Fit object storing model parameters.

        '''

        if mask is None:  # default mask includes all voxels
            mask = np.ones(data.shape[:-1])

        if data.shape[:-1] != mask.shape:
            raise ValueError("Mask shape should match first dimensions of " +
                             f"data, but data dimensions are f{data.shape} " +
                             f"while mask dimensions are f{mask.shape}")

        self.kernel = generate_kernel(self.gtab, self.sphere, self.wm_response,
                                      self.gm_response, self.csf_response)

        model_params = np.zeros(data.shape[:-1] +
                                (len(self.sphere.vertices) + 2, ))

        for ijk in np.ndindex(data.shape[:-1]):
            if mask[ijk]:

                vox_data = data[ijk]
                # Normalize data to mean b0 image
                vox_data = normalize_data(vox_data,
                                          self.where_b0s,
                                          min_signal=_EPS)
                # Rearrange data to match corrected gradient table
                vox_data = np.concatenate(([1], vox_data[self.where_dwi]))
                vox_data[vox_data > 1] = 1  # clip values between 0 and 1

                # Fitting
                model_param = rumba_deconv(vox_data, self.kernel, self.n_iter,
                                           self.recon_type, self.n_coils)

                model_params[ijk] = model_param

        model_fit = RumbaFit(self, model_params)
        return model_fit
Example #8
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    bval = np.repeat([0, 1000], [2, 20])
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.ones(65) * 1000
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.repeat([0, 1], [1, 64])
    d = normalize_data(sig, bval, 1)
    assert_raises(ValueError, normalize_data, None, bval, 0)

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 65.0)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 65.0)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 64.5)

    sig = sig * np.ones((2, 3, 1))

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 65.0)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 65.0)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 64.5)
Example #9
0
def test_normalize_data():

    sig = np.arange(1, 66)[::-1]

    bval = np.repeat([0, 1000], [2, 20])
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.ones(65) * 1000
    assert_raises(ValueError, normalize_data, sig, bval)
    bval = np.repeat([0, 1], [1, 64])
    d = normalize_data(sig, bval, 1)
    assert_raises(ValueError, normalize_data, None, bval, 0)

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[-5:], 5 / 64.5)

    sig = sig * np.ones((2, 3, 1))

    bval[[0, 1]] = [0, 1]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 65.)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 65.)

    bval[[0, 1]] = [0, 0]
    norm_sig = normalize_data(sig, bval, min_signal=1)
    assert_array_equal(norm_sig, sig / 64.5)
    norm_sig = normalize_data(sig, bval, min_signal=5)
    assert_array_equal(norm_sig[..., -5:], 5 / 64.5)
Example #10
0
def simple_tracking_function(data, fa, bval, bvec, seed_mask, start_steps,
                             voxel_size, density):
    """An example of a simple traking function using the tools in dipy

    This tracking function uses the SlowAdcOpdfModel to fit diffusion data. By
    using the ClosestPeakSelector, the function tracks along the peak of Opdf
    closest to the incoming direction. It also uses the BoundryIntegrator to
    integrate the streamlines and NearestNeighborInterpolator to interpolate
    the data. The ResidualBootstrap means the tracks are probabilistic, not
    deterministic.
    """

    #the interpolator allows us to index the dwi data in continous space
    data_mask = fa > .2
    normalized_data = normalize_data(data, bval)
    interpolator = NearestNeighborInterpolator(normalized_data, voxel_size,
                                               data_mask)

    #the model fits the dwi data, this model can resolve crossing fibers
    #see documentation of SlowAdcOpdfModel for more info
    model = SlowAdcOpdfModel(6, bval, bvec, .006)
    vert, edges, faces = create_half_unit_sphere(4)
    model.set_sampling_points(vert, edges)

    #this residual bootstrap wrapper returns a sample from the bootstrap
    #distribution istead of returning the raw data
    min_signal = normalized_data.min()
    B = model.B
    wrapped_interp = ResidualBootstrapWrapper(interpolator, B, min_signal)


    #the peakselector returns the closest peak to the incoming direction when
    #in voxels with multiple peaks
    peak_finder = ClosestPeakSelector(model, wrapped_interp)
    peak_finder.angle_limit = 60

    seeds = seeds_from_mask(seed_mask, density, voxel_size)

    #the propagator is used to integrate the streamlines
    propogator = BoundryIntegrator(voxel_size)
    tracks = generate_streamlines(peak_finder, propogator, seeds, start_steps)

    return tracks
Example #11
0
def simple_tracking_function(data, fa, bval, bvec, seed_mask, start_steps,
                             voxel_size, density):
    """An example of a simple traking function using the tools in dipy

    This tracking function uses the SlowAdcOpdfModel to fit diffusion data. By
    using the ClosestPeakSelector, the function tracks along the peak of Opdf
    closest to the incoming direction. It also uses the BoundryIntegrator to
    integrate the streamlines and NearestNeighborInterpolator to interpolate
    the data. The ResidualBootstrap means the tracks are probabilistic, not
    deterministic.
    """

    #the interpolator allows us to index the dwi data in continous space
    data_mask = fa > .2
    normalized_data = normalize_data(data, bval)
    interpolator = NearestNeighborInterpolator(normalized_data, voxel_size,
                                               data_mask)

    #the model fits the dwi data, this model can resolve crossing fibers
    #see documentation of SlowAdcOpdfModel for more info
    model = SlowAdcOpdfModel(6, bval, bvec, .006)
    vert, edges, faces = create_half_unit_sphere(4)
    model.set_sampling_points(vert, edges)

    #this residual bootstrap wrapper returns a sample from the bootstrap
    #distribution istead of returning the raw data
    min_signal = normalized_data.min()
    B = model.B
    wrapped_interp = ResidualBootstrapWrapper(interpolator, B, min_signal)

    #the peakselector returns the closest peak to the incoming direction when
    #in voxels with multiple peaks
    peak_finder = ClosestPeakSelector(model, wrapped_interp)
    peak_finder.angle_limit = 60

    seeds = seeds_from_mask(seed_mask, density, voxel_size)

    #the propagator is used to integrate the streamlines
    propogator = BoundryIntegrator(voxel_size)
    tracks = generate_streamlines(peak_finder, propogator, seeds, start_steps)

    return tracks
Example #12
0
def main():
    params = readArgs()
    # read in from the command line
    read_args = params.collect_args()
    params.check_args(read_args)

    # get img obj
    dwi_img = nib.load(params.dwi_)
    mask_img = nib.load(params.mask_)

    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(params.bval_, params.bvec_)

    # need to create the gradient table yo
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs, b0_threshold=25)

    # get the data from image objects
    dwi_data = dwi_img.get_data()
    mask_data = mask_img.get_data()
    # and get affine
    img_affine = dwi_img.affine

    from dipy.data import get_sphere
    sphere = get_sphere('repulsion724')

    from dipy.segment.mask import applymask
    dwi_data = applymask(dwi_data, mask_data)

    printfl('dwi_data.shape (%d, %d, %d, %d)' % dwi_data.shape)
    printfl('\nYour bvecs look like this:{0}'.format(bvecs))
    printfl('\nYour bvals look like this:{0}\n'.format(bvals))

    from dipy.reconst.shm import anisotropic_power, sph_harm_lookup, smooth_pinv, normalize_data
    from dipy.core.sphere import HemiSphere

    smooth = 0.0
    normed_data = normalize_data(dwi_data, gtab.b0s_mask)
    normed_data = normed_data[..., np.where(1 - gtab.b0s_mask)[0]]

    from dipy.core.gradients import gradient_table_from_bvals_bvecs
    gtab2 = gradient_table_from_bvals_bvecs(
        gtab.bvals[np.where(1 - gtab.b0s_mask)[0]],
        gtab.bvecs[np.where(1 - gtab.b0s_mask)[0]])

    signal_native_pts = HemiSphere(xyz=gtab2.bvecs)
    sph_harm_basis = sph_harm_lookup.get(None)
    Ba, m, n = sph_harm_basis(params.sh_order_, signal_native_pts.theta,
                              signal_native_pts.phi)

    L = -n * (n + 1)
    invB = smooth_pinv(Ba, np.sqrt(smooth) * L)

    # fit SH basis to DWI signal
    normed_data_sh = np.dot(normed_data, invB.T)

    # power map call
    printfl("fitting power map")
    pow_map = anisotropic_power(normed_data_sh,
                                norm_factor=0.00001,
                                power=2,
                                non_negative=True)

    pow_map_img = nib.Nifti1Image(pow_map.astype(np.float32), img_affine)
    # make output name
    out_name = ''.join(
        [params.output_, '_powMap_sh',
         str(params.sh_order_), '.nii.gz'])

    printfl("writing power map to: {}".format(out_name))
    nib.save(pow_map_img, out_name)
    pl.makeFolder(saveDir)
    dwiDir = os.path.join(diffusionDir, 'data.nii.gz')
    bvalDir = os.path.join(diffusionDir, 'bvals')
    bvecDir = os.path.join(diffusionDir, 'bvecs')
    maskDir = os.path.join(dwi, 'nodif_brain_mask.nii.gz')

    # Load data
    img = nib.load(dwiDir)
    data = img.get_data()
    maskImg = nib.load(maskDir)
    maskData = maskImg.get_data()
    bvals, bvecs = read_bvals_bvecs(bvalDir, bvecDir)
    gtab = gradient_table(bvals, bvecs, b0_threshold=50)

    # Normalise data
    dataNormalised = shm.normalize_data(data, gtab.b0s_mask)
    # dataNormalisedNii = nib.Nifti1Image(dataNormalised, img.get_affine(),
    # img.get_header())
    # dataNormalisedNii.to_filename('dataNormalised.nii.gz')

    # Convert bvecs to angles
    where_dwis = 1 - gtab.b0s_mask
    x = gtab.gradients[where_dwis == True, 0]
    y = gtab.gradients[where_dwis == True, 1]
    z = gtab.gradients[where_dwis == True, 2]
    r, theta, phi = shm.cart2sphere(x, y, z)

    # Make design matrix
    B, m, n = shm.real_sym_sh_basis(order, theta[:, None], phi[:, None])
    Binverse = shm.pinv(B)