Ejemplo n.º 1
0
def test_bounding_box():
    vol = np.zeros((100, 100, 50), dtype=int)

    # Check the more usual case
    vol[10:90, 11:40, 5:33] = 3
    mins, maxs = bounding_box(vol)
    assert_equal(mins, [10, 11, 5])
    assert_equal(maxs, [90, 40, 33])

    # Check a 2d case
    mins, maxs = bounding_box(vol[10])
    assert_equal(mins, [11, 5])
    assert_equal(maxs, [40, 33])

    vol[:] = 0
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        # Trigger a warning.
        num_warns = len(w)
        mins, maxs = bounding_box(vol)
        # Assert number of warnings has gone up by 1
        assert_equal(len(w), num_warns + 1)

        # Check that an empty array returns zeros for both min & max
        assert_equal(mins, [0, 0, 0])
        assert_equal(maxs, [0, 0, 0])

        # Check the 2d case
        mins, maxs = bounding_box(vol[0])
        assert_equal(len(w), num_warns + 2)
        assert_equal(mins, [0, 0])
        assert_equal(maxs, [0, 0])
Ejemplo n.º 2
0
def test_bounding_box():
    vol = np.zeros((100, 100, 50), dtype=int)

    # Check the more usual case
    vol[10:90, 11:40, 5:33] = 3
    mins, maxs = bounding_box(vol)
    assert_equal(mins, [10, 11, 5])
    assert_equal(maxs, [90, 40, 33])

    # Check a 2d case
    mins, maxs = bounding_box(vol[10])
    assert_equal(mins, [11, 5])
    assert_equal(maxs, [40, 33])

    vol[:] = 0
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        # Trigger a warning.
        num_warns = len(w)
        mins, maxs = bounding_box(vol)
        # Assert number of warnings has gone up by 1
        assert_equal(len(w), num_warns + 1)

        # Check that an empty array returns zeros for both min & max
        assert_equal(mins, [0, 0, 0])
        assert_equal(maxs, [0, 0, 0])

        # Check the 2d case
        mins, maxs = bounding_box(vol[0])
        assert_equal(len(w), num_warns + 2)
        assert_equal(mins, [0, 0])
        assert_equal(maxs, [0, 0])
Ejemplo n.º 3
0
def test_mask():
    vol = np.zeros((30, 30, 30))
    vol[15, 15, 15] = 1
    struct = generate_binary_structure(3, 1)
    voln = binary_dilation(vol, structure=struct, iterations=4).astype('f4')
    initial = np.sum(voln > 0)
    mask = voln.copy()
    thresh = otsu(mask)
    mask = mask > thresh
    initial_otsu = np.sum(mask > 0)
    assert_equal(initial_otsu, initial)

    mins, maxs = bounding_box(mask)
    voln_crop = crop(mask, mins, maxs)
    initial_crop = np.sum(voln_crop > 0)
    assert_equal(initial_crop, initial)

    applymask(voln, mask)
    final = np.sum(voln > 0)
    assert_equal(final, initial)

    # Test multi_median.
    median_test = np.arange(25).reshape(5, 5)
    median_control = median_test.copy()
    medianradius = 3
    median_test = multi_median(median_test, medianradius, 3)

    medarr = np.ones_like(median_control.shape) * ((medianradius * 2) + 1)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    assert_equal(median_test, median_control)
Ejemplo n.º 4
0
def test_mask():
    vol = np.zeros((30, 30, 30))
    vol[15, 15, 15] = 1
    struct = generate_binary_structure(3, 1)
    voln = binary_dilation(vol, structure=struct, iterations=4).astype('f4')
    initial = np.sum(voln > 0)
    mask = voln.copy()
    thresh = otsu(mask)
    mask = mask > thresh
    initial_otsu = np.sum(mask > 0)
    assert_equal(initial_otsu, initial)

    mins, maxs = bounding_box(mask)
    voln_crop = crop(mask, mins, maxs)
    initial_crop = np.sum(voln_crop > 0)
    assert_equal(initial_crop, initial)

    applymask(voln, mask)
    final = np.sum(voln > 0)
    assert_equal(final, initial)

    # Test multi_median.
    median_test = np.arange(25).reshape(5, 5)
    median_control = median_test.copy()
    medianradius = 3
    median_test = multi_median(median_test, medianradius, 3)

    medarr = np.ones_like(median_control.shape) * ((medianradius * 2) + 1)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    assert_equal(median_test, median_control)
Ejemplo n.º 5
0
    def bound_data(self):

        min_indicies, max_indicies = bounding_box(self.mask)

        min_x, min_y, min_z = min_indicies
        max_x, max_y, max_z = max_indicies

        self.data = self.data[min_x:max_x, min_y:max_y, min_z:max_z, :]
        self.mask = self.mask[min_x:max_x, min_y:max_y, min_z:max_z]
Ejemplo n.º 6
0
def compute_nifti_bounding_box(img):
    """Finds bounding box from data and transforms it in world space for use
    on data with different attributes like voxel size."""
    data = img.get_fdata(dtype=np.float32, caching='unchanged')
    affine = img.affine
    voxel_size = img.header.get_zooms()[0:3]

    voxel_bb_mins, voxel_bb_maxs = bounding_box(data)

    world_bb_mins = voxel_to_world(voxel_bb_mins, affine)
    world_bb_maxs = voxel_to_world(voxel_bb_maxs, affine)
    wbbox = WorldBoundingBox(world_bb_mins, world_bb_maxs, voxel_size)

    return wbbox
Ejemplo n.º 7
0
def compute_masks_crop_bet(reference_scan, other_scan1, ref_scan_path,
                           ref_dir_path, other_dir_path, starting_dir):
    # Use bet for generating brain masks for each of the scans

    # Get the mask of the reference scan
    os.chdir(ref_dir_path)
    subprocess.call([
        "bet",
        os.path.basename(ref_scan_path), "Brain_temp", "-m", "-n", "-R", "-f",
        "0.2", "-t"
    ])
    reference_scan_mask = nib.load("Brain_temp_mask.nii.gz")
    reference_scan_mask = reference_scan_mask.get_data()
    # Delete the created files
    os.remove('Brain_temp.nii.gz')
    os.remove('Brain_temp_mask.nii.gz')

    # Go back to the original directory. We do this because the file paths specified are not
    # Required to be absolute
    os.chdir(starting_dir)

    # Similarly get the masks of the other scans
    os.chdir(other_dir_path)
    subprocess.call([
        "bet", "Full_Registered_Scan.nii.gz", "Brain_temp", "-m", "-n", "-R",
        "-f", "0.2", "-t"
    ])
    other_scan1_mask = nib.load("Brain_temp_mask.nii.gz")
    other_scan1_mask = other_scan1_mask.get_data()
    os.remove('Brain_temp.nii.gz')
    os.remove('Brain_temp_mask.nii.gz')

    #Get the intersection of the masks
    mask_union = np.logical_and(reference_scan_mask, other_scan1_mask)

    #Apply the combined mask to the scans
    reference_scan_brain = applymask(reference_scan, mask_union)
    other_scan1_brain = applymask(other_scan1, mask_union)

    #Crop the scans using the unioned mask
    (mins, maxs) = bounding_box(mask_union)
    reference_scan_brain = crop(reference_scan_brain, mins, maxs)

    return (reference_scan_brain, other_scan1_brain)
Ejemplo n.º 8
0
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.

Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.

The following lines perform these two operations and then saves the computed mask.
"""

print('Computing worst-case/best-case SNR using the corpus callosum...')

threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])

mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff

CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1],
       bounds_min[2]:bounds_max[2]] = 1

mask_cc_part, cfa = segment_from_cfa(tensorfit,
                                     CC_box,
                                     threshold,
                                     return_cfa=True)

cfa_img = nib.Nifti1Image((cfa * 255).astype(np.uint8), affine)
Ejemplo n.º 9
0
def rumba_deconv_global(data,
                        kernel,
                        mask,
                        n_iter=600,
                        recon_type='smf',
                        n_coils=1,
                        R=1,
                        use_tv=True,
                        verbose=False):
    r'''
    Fit fODF for all voxels simultaneously using RUMBA-SD.

    Deconvolves the kernel from the diffusion-weighted signal at each voxel by
    computing a maximum likelihood estimation of the fODF [1]_. Global fitting
    also permits the use of total variation regularization (RUMBA-SD + TV). The
    spatial dependence introduced by TV promotes smoother solutions (i.e.
    prevents oscillations), while still allowing for sharp discontinuities
    [2]_. This promotes smoothness and continuity along individual tracts while
    preventing smoothing of adjacent tracts.

    Generally, global_fit will proceed more quickly than the voxelwise fit
    provided that the computer has adequate RAM (>= 16 GB should be more than
    sufficient).

    Parameters
    ----------
    data : 4d ndarray (x, y, z, N)
        Signal values for entire brain. None of the volume dimensions x, y, z
        can be 1 if TV regularization is required.
    kernel : 2d ndarray (N, M)
        Deconvolution kernel mapping volume fractions of the M compartments to
        N-length signal. Last two columns should be for GM and CSF.
    mask : 3d ndarray(x, y, z)
        Binary mask specifying voxels of interest with 1; fODF will only be
        fit at these voxels (0 elsewhere).
    n_iter : int, optional
        Number of iterations for fODF estimation. Must be a positive int.
        Default: 600
    recon_type : {'smf', 'sos'}, optional
        MRI reconstruction method: spatial matched filter (SMF) or
        sum-of-squares (SoS). SMF reconstruction generates Rician noise while
        SoS reconstruction generates Noncentral Chi noise. Default: 'smf'
    n_coils : int, optional
        Number of coils in MRI scanner -- only relevant in SoS reconstruction.
        Must be a positive int. Default: 1
    use_tv : bool, optional
        If true, applies total variation regularization. This requires a brain
        volume with no singleton dimensions. Default: True
    verbose : bool, optional
        If true, logs updates on estimated signal-to-noise ratio after each
        iteration. Default: False

    Returns
    -------
    fit_array : 4d ndarray (x, y, z, M)
        fODF and GM/CSF volume fractions computed for each voxel. First M-2
        components are fODF, while last two are GM and CSf respectively.

    Notes
    -----
    TV modifies our cost function as follows:

    $J(\textbf{f}) = -\log{P(\textbf{S}|\textbf{H}, \textbf{f}, \sigma^2, n)})+
    \alpha_{TV}TV(\textbf{f})$

    where the first term is the negative log likelihood described in the notes
    of `rumba_deconv`, and the second term is the TV energy, or the sum of
    gradient absolute values for the fODF across the entire brain. This results
    in a new multiplicative factor in the iterative scheme, now becoming:

    $\textbf{f}^{k+1} = \textbf{f}^k \circ \frac{\textbf{H}^T\left[\textbf{S}
    \circ\frac{I_n(\textbf{S}\circ\textbf{Hf}^k/\sigma^2)} {I_{n-1}(\textbf{S}
    \circ\textbf{Hf}^k/\sigma^2)} \right ]} {\textbf{H}^T\textbf{Hf}^k}\circ
    \textbf{R}^k$

    where $\textbf{R}^k$ is computed voxelwise by:

    $(\textbf{R}^k)_j = \frac{1}{1 - \alpha_{TV}div\left(\frac{\triangledown[
    \textbf{f}^k_{3D}]_j}{\lvert\triangledown[\textbf{f}^k_{3D}]_j \rvert}
    \right)\biggr\rvert_{x, y, z}}$

    Here, $\triangledown$ is the symbol for the 3D gradient at any voxel.

    The regularization strength, $\alpha_{TV}$ is updated after each iteration
    by the discrepancy principle -- specifically, it is selected to match the
    estimated variance after each iteration [3]_.

    References
    ----------
    .. [1] Canales-Rodríguez, E. J., Daducci, A., Sotiropoulos, S. N., Caruyer,
           E., Aja-Fernández, S., Radua, J., Mendizabal, J. M. Y.,
           Iturria-Medina, Y., Melie-García, L., Alemán-Gómez, Y., Thiran,
           J.-P., Sarró, S., Pomarol-Clotet, E., & Salvador, R. (2015).
           Spherical Deconvolution of Multichannel Diffusion MRI Data with
           Non-Gaussian Noise Models and Spatial Regularization. PLOS ONE,
           10(10), e0138910. https://doi.org/10.1371/journal.pone.0138910

    .. [2] Rudin, L. I., Osher, S., & Fatemi, E. (1992). Nonlinear total
           variation based noise removal algorithms. Physica D: Nonlinear
           Phenomena, 60(1), 259–268.
           https://doi.org/10.1016/0167-2789(92)90242-F

    .. [3] Chambolle A. An algorithm for total variation minimization and
           applications. Journal of Mathematical Imaging and Vision. 2004;
           20:89–97.
    '''

    # Crop data to reduce memory consumption
    dim_orig = data.shape
    ixmin, ixmax = bounding_box(mask)
    data = crop(data, ixmin, ixmax)
    mask = crop(mask, ixmin, ixmax)

    if np.any(np.array(data.shape[:3]) == 1) and use_tv:
        raise ValueError("Cannot use TV regularization if any spatial" +
                         "dimensions are 1; " +
                         f"provided dimensions were {data.shape[:3]}")

    epsilon = 1e-7

    n_grad = kernel.shape[0]  # gradient directions
    n_comp = kernel.shape[1]  # number of compartments
    dim = data.shape
    n_v_tot = np.prod(dim[:3])  # total number of voxels

    # Initial guess is iso-probable
    fodf0 = np.ones((n_comp, 1), dtype=np.float32)
    fodf0 = fodf0 / np.sum(fodf0, axis=0)

    if recon_type == "smf":
        n_order = 1  # Rician noise (same as Noncentral Chi with order 1)
    elif recon_type == "sos":
        n_order = n_coils  # Noncentral Chi noise (order = # of coils)
    else:
        raise ValueError("Invalid recon_type. Should be 'smf' or 'sos', " +
                         f"received f{recon_type}")

    mask_vec = np.ravel(mask)
    # Indices of target voxels
    index_mask = np.atleast_1d(np.squeeze(np.argwhere(mask_vec)))
    n_v_true = len(index_mask)  # number of target voxels

    data_2d = np.zeros((n_v_true, n_grad), dtype=np.float32)
    for i in range(n_grad):
        data_2d[:, i] = np.ravel(
            data[:, :, :, i])[index_mask]  # only keep voxels of interest

    data_2d = data_2d.T
    fodf = np.tile(fodf0, (1, n_v_true))
    reblurred = np.matmul(kernel, fodf)

    # For use later
    kernel_t = kernel.T
    f_zero = 0

    # Initialize algorithm parameters
    sigma0 = 1 / 15
    sigma2 = sigma0**2
    tv_lambda = sigma2  # initial guess for TV regularization strength

    # Expand into matrix form for iterations
    sigma2 = sigma2 * np.ones(data_2d.shape, dtype=np.float32)
    tv_lambda_aux = np.zeros((n_v_tot), dtype=np.float32)

    reblurred_s = data_2d * reblurred / sigma2

    for i in range(n_iter):
        fodf_i = fodf
        ratio = mbessel_ratio(n_order, reblurred_s).astype(np.float32)
        rl_factor = np.matmul(kernel_t, data_2d * ratio) / \
            (np.matmul(kernel_t, reblurred) + _EPS)

        if use_tv:  # apply TV regularization
            tv_factor = np.ones(fodf_i.shape, dtype=np.float32)
            fodf_4d = _reshape_2d_4d(fodf_i.T, mask)
            # Compute gradient, divergence
            gr = _grad(fodf_4d)
            d_inv = 1 / np.sqrt(epsilon**2 + np.sum(gr**2, axis=3))
            gr_norm = (gr * d_inv[:, :, :, None, :])
            div_f = _divergence(gr_norm)
            g0 = np.abs(1 - tv_lambda * div_f)
            tv_factor_4d = 1 / (g0 + _EPS)

            for j in range(n_comp):
                tv_factor_1d = np.ravel(tv_factor_4d[:, :, :, j])[index_mask]
                tv_factor[j, :] = tv_factor_1d

            # Apply TV regularization to iteration factor
            rl_factor = rl_factor * tv_factor

        fodf = fodf_i * rl_factor  # result of iteration
        fodf = np.maximum(f_zero, fodf)  # positivity constraint

        # Update other variables
        reblurred = np.matmul(kernel, fodf)
        reblurred_s = data_2d * reblurred / sigma2

        # Iterate variance
        sigma2_i = (1 / (n_grad * n_order)) * \
            np.sum((data_2d**2 + reblurred**2) / 2 - (
                sigma2 * reblurred_s) * ratio, axis=0)
        sigma2_i = np.minimum((1 / 8)**2, np.maximum(sigma2_i, (1 / 80)**2))

        if verbose:
            logger.info("Iteration %d of %d", i + 1, n_iter)

            snr_mean = np.mean(1 / np.sqrt(sigma2_i))
            snr_std = np.std(1 / np.sqrt(sigma2_i))
            logger.info("Mean SNR (S0/sigma) estimated to be %.3f +/- %.3f",
                        snr_mean, snr_std)
        # Expand into matrix
        sigma2 = np.tile(sigma2_i[None, :], (data_2d.shape[0], 1))

        # Update TV regularization strength using the discrepancy principle
        if use_tv:
            if R == 1:
                tv_lambda = np.mean(sigma2_i)

                if tv_lambda < (1 / 30)**2:
                    tv_lambda = (1 / 30)**2
            else:  # different factor for each voxel
                tv_lambda_aux[index_mask] = sigma2_i
                tv_lambda = np.reshape(tv_lambda_aux, (*dim[:3], 1))

    fodf = fodf.astype(np.float64)
    fodf = fodf / (np.sum(fodf, axis=0)[None, ...] + _EPS)  # normalize fODF

    # Extract compartments
    fit_array = np.zeros((*dim_orig[:3], n_comp))
    _reshape_2d_4d(fodf.T,
                   mask,
                   out=fit_array[ixmin[0]:ixmax[0], ixmin[1]:ixmax[1],
                                 ixmin[2]:ixmax[2]])

    return fit_array
Ejemplo n.º 10
0
fbvec = input_directory + "/bvecs"

# Load the data
img = nib.load(fdwi)
img_data = img.get_data()
# Load the mask
mask = nib.load(fmask)
mask_data = mask.get_data()

#load bvals, bvecs and gradient files
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)

# Apply the mask to the volume
mask_boolean = mask_data > 0.01
mins, maxs = bounding_box(mask_boolean)
mask_boolean = crop(mask_boolean, mins, maxs)
cropped_volume = crop(img_data, mins, maxs)
data = applymask(cropped_volume, mask_boolean)

fw_runner = FreewaterRunner(data, gtab)
fw_runner.LOG = True  # turn on logging for this example
fw_runner.run_model(num_iter=100, dt=0.001)

# save the loss function to the working directory
#freewater_runner.plot_loss()

# Save the free water map somewhere
fw_file = output_directory + "/freewater.nii.gz"
nib.save(nib.Nifti1Image(fw_runner.get_fw_map(), img.affine), fw_file)
Ejemplo n.º 11
0
    def run(self, data_files, bvals_files, bvecs_files, mask_file,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='',
            out_file='product.json', out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_file : string
            Path of a brain mask file.
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            mask, affine = load_nifti(mask_path)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info(
                'Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0],
                   bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1]//2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0]/noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) +
                                 " " + str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction]/noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                        'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) +
                        ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                        'directions': 'b0' + ' ' + str(SNR_directions[0]) +
                        ' ' + str(SNR_directions[1]) + ' ' +
                        str(SNR_directions[2])
                        })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Ejemplo n.º 12
0
def main():

    parser = buildArgsParser()
    args = parser.parse_args()

    image = nib.load(args.image)
    affine = image.get_affine()
    data = image.get_data()

    if args.savename is None:
        temp, _ = str.split(os.path.basename(args.image), '.', 1)
        filename = os.path.dirname(os.path.realpath(args.image)) + '/' + temp

    else:
        filename, _ = str.split(args.savename, '.', 1)

    # If the file already exists, check if the user want to overwrite it. If
    # not, simply exit.
    filename_CC = filename + '_mask_CC.nii.gz'
    if os.path.exists(filename_CC):

        if not args.overwrite:
            raise ValueError("File " + filename_CC +
                             " already exists. Use -f option to overwrite.")

        print(filename_CC, " already exist and will be overwritten.")

    if args.mask is not None:
        mask = nib.load(args.mask).get_data()
    else:
        b0_mask, mask = median_otsu(data)
        nib.save(nib.Nifti1Image(mask.astype('int16'), affine),
                 filename + '_mask.nii.gz')

    # Rough box estimation of the corpus callosum, since we do not want to
    # include noise that appears on the boundaries of the skull
    CC_box = np.zeros_like(data[..., 0])

    if args.loc is None:
        mins, maxs = bounding_box(mask)
        mins = np.array(mins)
        maxs = np.array(maxs)
        diff = (maxs - mins) // 4
        bounds_min = mins + diff

        # Min in z goes down to the neck, so we need to further restrict
        # the bounding to not get random voxel at the bottom
        bounds_min[2] = np.floor(1.5 * bounds_min[2])

        bounds_max = maxs - diff
    else:
        bounds_min = np.array(literal_eval(args.loc))[0::2]
        bounds_max = np.array(literal_eval(args.loc))[1::2]

    CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1],
           bounds_min[2]:bounds_max[2]] = 1

    threshold = np.array(literal_eval(args.t))

    print("Threshold in RGB is :", threshold)
    print("ROI used spans from :", bounds_min, "to", bounds_max)

    if args.is_rgb is True:

        # The example works on threshold between 0 and 1, so we divide the RGB
        # by 255, which is the max value
        data = data.astype('float32') / 255.
        CC_mask = segment_from_RGB(data, CC_box, threshold)

    else:
        CC_mask = segment_from_dwi(image, args.bvals, args.bvecs, CC_box,
                                   threshold, mask, filename, args.overwrite)

    nib.save(nib.Nifti1Image(CC_mask, affine), filename_CC)
    print("Mask of the corpus callosum was saved as", filename_CC)
Ejemplo n.º 13
0
    def show(self, rescale=False, fps=10, output=''):
        self._visuVolume = self._qVolume.astype("uint8")

        # Scale data direction wise or globally
        if rescale:
            log.info("Scaling data for every direction individually...")
            for i in range(0, self._qVolume.shape[2]):
                diff_dir_image = np.copy(self._qVolume[:, :, i].astype(float))
                mini, maxi = np.amin(diff_dir_image), np.amax(diff_dir_image)
                self._visuVolume[:, :, i] = (255.0 * (diff_dir_image - mini) /
                                             maxi).astype("uint8")
        else:
            image = np.copy(self._qVolume.astype(float))
            mini, maxi = np.amin(image), np.amax(image)
            self._visuVolume = (255.0 * (image - mini) / maxi).astype("uint8")

        if output:
            # Output the animation
            images = self._visuVolume
            crop_image = np.copy(images)

            # Cropping volume
            t = 0.05
            log.info("Cropping volume in a bounding box from pixels below " +
                     str(int(t * 100.0)) + " % intensity threshold ...")
            crop_image[crop_image <= int(t * 255.0)] = 0
            min_bounds, max_bounds = mask.bounding_box(crop_image)
            images = mask.crop(images, min_bounds, max_bounds)

            # Swap axes for gif writer
            images = np.swapaxes(images, 0, 2)
            images = np.swapaxes(images, 1, 2)

            if output.find(".gif") < 0:
                output = output + ".gif"
            imageio.mimwrite(output, images)
            log.info(output + " file created.")
        else:
            # Initialize figure
            fig = plt.figure()

            # Add current frame to figure
            plt.subplot(121)
            self._currentFrame = plt.imshow(self._visuVolume[:, :, 0],
                                            cmap='gray')
            if not rescale:
                plt.colorbar()
            self._currentFrame.set_interpolation('nearest')
            plt.axis('off')

            # Add text display to figure
            plt.subplot(122)
            plt.text(0.5,
                     0.7,
                     "Data : " + str(self._data.shape) + "\n" +
                     "Q-Space volume : " + str(self._qVolume.shape),
                     horizontalalignment='center',
                     verticalalignment='center')
            self._currentText = plt.text(
                0.5,
                0.5,
                "Position , action='store_true': Value, Mean\n" +
                str(self._currentPosition),
                horizontalalignment='center',
                verticalalignment='center',
                fontsize='x-large')
            plt.axis('off')

            # Connect the callback functions
            fig.canvas.mpl_connect('motion_notify_event', self._onMove)
            fig.canvas.mpl_connect('button_press_event', self._onClick)
            fig.canvas.mpl_connect('scroll_event', self._onScroll)

            # Create the animation in the figure
            anim = animation.FuncAnimation(fig,
                                           self._updateImage,
                                           frames=self._qVolume.shape[2],
                                           interval=1000.0 / fps)

            plt.show()
Ejemplo n.º 14
0
    def run(self, data_files, bvals_files, bvecs_files, mask_files,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='',
            out_file='product.json', out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_files : string
            Path of brain mask
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            logging.info('Computing brain mask...')
            _, calc_mask = median_otsu(data)

            mask, affine = load_nifti(mask_path)
            mask = np.array(calc_mask == mask.astype(bool)).astype(int)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info(
                'Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0],
                   bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1]//2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0]/noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) +
                                 " " + str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction]/noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                        'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) +
                        ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                        'directions': 'b0' + ' ' + str(SNR_directions[0]) +
                        ' ' + str(SNR_directions[1]) + ' ' +
                        str(SNR_directions[2])
                        })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Ejemplo n.º 15
0
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.

The following lines perform these two operations and then saves the computed mask.
"""

print("Computing worst-case/best-case SNR using the corpus callosum...")
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box

threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])

mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff

CC_box[bounds_min[0] : bounds_max[0], bounds_min[1] : bounds_max[1], bounds_min[2] : bounds_max[2]] = 1

mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, threshold, return_cfa=True)

cfa_img = nib.Nifti1Image((cfa * 255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, "mask_CC_part.nii.gz")

import matplotlib.pyplot as plt
Ejemplo n.º 16
0
    def run(self,
            data_file,
            data_bvals,
            data_bvecs,
            mask=None,
            bbox_threshold=(0.6, 1, 0, 0.1, 0, 0.1),
            out_dir='',
            out_file='product.json',
            out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """ Workflow for computing the signal-to-noise ratio in the
            corpus callosum

        Parameters
        ----------
        data_file : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        data_bvals : string
            Path of bvals.
        data_bvecs : string
            Path of bvecs.
        mask : string, optional
            Path of mask if desired. (default None)
        bbox_threshold : string, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')
        """

        if not isinstance(bbox_threshold, tuple):
            b = bbox_threshold.replace("[", "")
            b = b.replace("]", "")
            b = b.replace("(", "")
            b = b.replace(")", "")
            b = b.replace(" ", "")
            b = b.split(",")
            for i in range(len(b)):
                b[i] = float(b[i])
            bbox_threshold = tuple(b)

        io_it = self.get_io_iterator()

        for data_path, data_bvals_path, data_bvecs_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            img = nib.load('{0}'.format(data_path))
            bvals, bvecs = read_bvals_bvecs('{0}'.format(data_bvals_path),
                                            '{0}'.format(data_bvecs_path))
            gtab = gradient_table(bvals, bvecs)

            data = img.get_data()
            affine = img.affine

            logging.info('Computing brain mask...')
            b0_mask, calc_mask = median_otsu(data)

            if mask is None:
                mask = calc_mask
            else:
                mask = nib.load(mask).get_data().astype(bool)
                mask = np.array(calc_mask == mask).astype(int)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info('Computing worst-case/best-case SNR using the CC...')
            threshold = bbox_threshold

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            mask_cc_part, cfa = segment_from_cfa(tensorfit,
                                                 CC_box,
                                                 threshold,
                                                 return_cfa=True)

            cfa_img = nib.Nifti1Image((cfa * 255).astype(np.uint8), affine)
            mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8),
                                               affine)
            nib.save(mask_cc_part_img, cc_mask_path)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1] // 2] = 1
            mask_noise = ~mask_noise
            mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8),
                                             affine)
            nib.save(mask_noise_img, mask_noise_path)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs - np.array([1, 0, 0]))**2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs - np.array([0, 1, 0]))**2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs - np.array([0, 0, 1]))**2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0] / noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) + " " +
                                 str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction] / noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                'data':
                str(SNR_output[0]) + ' ' + str(SNR_output[1]) + ' ' +
                str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                'directions':
                'b0' + ' ' + str(SNR_directions[0]) + ' ' +
                str(SNR_directions[1]) + ' ' + str(SNR_directions[2])
            })

            with open(os.path.join(out_dir, out_file), 'w') as myfile:
                json.dump(data, myfile)
Ejemplo n.º 17
0
 def get_bounded_image(self, vol):
     min_indicies, max_indicies = bounding_box(vol)
     bounded_image = crop(vol, min_indicies, max_indicies)
     return bounded_image
Ejemplo n.º 18
0
def crop_using_mask(data, mask):
    mins, maxs = bounding_box(mask)
    cropped_data = crop(data, mins, maxs)
    return cropped_data