Beispiel #1
0
def test_mask():
    vol = np.zeros((30, 30, 30))
    vol[15, 15, 15] = 1
    struct = generate_binary_structure(3, 1)
    voln = binary_dilation(vol, structure=struct, iterations=4).astype('f4')
    initial = np.sum(voln > 0)
    mask = voln.copy()
    thresh = otsu(mask)
    mask = mask > thresh
    initial_otsu = np.sum(mask > 0)
    assert_equal(initial_otsu, initial)

    mins, maxs = bounding_box(mask)
    voln_crop = crop(mask, mins, maxs)
    initial_crop = np.sum(voln_crop > 0)
    assert_equal(initial_crop, initial)

    applymask(voln, mask)
    final = np.sum(voln > 0)
    assert_equal(final, initial)

    # Test multi_median.
    median_test = np.arange(25).reshape(5, 5)
    median_control = median_test.copy()
    medianradius = 3
    median_test = multi_median(median_test, medianradius, 3)

    medarr = np.ones_like(median_control.shape) * ((medianradius * 2) + 1)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    assert_equal(median_test, median_control)
Beispiel #2
0
def test_mask():
    vol = np.zeros((30, 30, 30))
    vol[15, 15, 15] = 1
    struct = generate_binary_structure(3, 1)
    voln = binary_dilation(vol, structure=struct, iterations=4).astype('f4')
    initial = np.sum(voln > 0)
    mask = voln.copy()
    thresh = otsu(mask)
    mask = mask > thresh
    initial_otsu = np.sum(mask > 0)
    assert_equal(initial_otsu, initial)

    mins, maxs = bounding_box(mask)
    voln_crop = crop(mask, mins, maxs)
    initial_crop = np.sum(voln_crop > 0)
    assert_equal(initial_crop, initial)

    applymask(voln, mask)
    final = np.sum(voln > 0)
    assert_equal(final, initial)

    # Test multi_median.
    median_test = np.arange(25).reshape(5, 5)
    median_control = median_test.copy()
    medianradius = 3
    median_test = multi_median(median_test, medianradius, 3)

    medarr = np.ones_like(median_control.shape) * ((medianradius * 2) + 1)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    median_filter(median_control, medarr, output=median_control)
    assert_equal(median_test, median_control)
def compute_masks_crop_bet(reference_scan, other_scan1, ref_scan_path,
                           ref_dir_path, other_dir_path, starting_dir):
    # Use bet for generating brain masks for each of the scans

    # Get the mask of the reference scan
    os.chdir(ref_dir_path)
    subprocess.call([
        "bet",
        os.path.basename(ref_scan_path), "Brain_temp", "-m", "-n", "-R", "-f",
        "0.2", "-t"
    ])
    reference_scan_mask = nib.load("Brain_temp_mask.nii.gz")
    reference_scan_mask = reference_scan_mask.get_data()
    # Delete the created files
    os.remove('Brain_temp.nii.gz')
    os.remove('Brain_temp_mask.nii.gz')

    # Go back to the original directory. We do this because the file paths specified are not
    # Required to be absolute
    os.chdir(starting_dir)

    # Similarly get the masks of the other scans
    os.chdir(other_dir_path)
    subprocess.call([
        "bet", "Full_Registered_Scan.nii.gz", "Brain_temp", "-m", "-n", "-R",
        "-f", "0.2", "-t"
    ])
    other_scan1_mask = nib.load("Brain_temp_mask.nii.gz")
    other_scan1_mask = other_scan1_mask.get_data()
    os.remove('Brain_temp.nii.gz')
    os.remove('Brain_temp_mask.nii.gz')

    #Get the intersection of the masks
    mask_union = np.logical_and(reference_scan_mask, other_scan1_mask)

    #Apply the combined mask to the scans
    reference_scan_brain = applymask(reference_scan, mask_union)
    other_scan1_brain = applymask(other_scan1, mask_union)

    #Crop the scans using the unioned mask
    (mins, maxs) = bounding_box(mask_union)
    reference_scan_brain = crop(reference_scan_brain, mins, maxs)

    return (reference_scan_brain, other_scan1_brain)
Beispiel #4
0
def fit_from_model(model, data, mask=None, nbr_processes=None):
    """Fit the model to data

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    data : np.ndarray (4d)
        Diffusion data.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    nbr_processes : int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    fit_array : np.ndarray
        Array containing the fit
    """
    data_shape = data.shape
    if mask is None:
        mask = np.sum(data, axis=3).astype(bool)
    else:
        data = applymask(data, mask)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
        or nbr_processes <= 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    data = data.ravel().reshape(np.prod(data_shape[0:3]), data_shape[3])
    chunks = np.array_split(data, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        fit_from_model_parallel,
        zip(itertools.repeat(model), chunks, np.arange(len(chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    fit_array = np.zeros((np.prod(data_shape[0:3]), ), dtype='object')
    for i, fit in results:
        fit_array[chunk_len[i]:chunk_len[i + 1]] = fit
    fit_array = MultiVoxelFit(model, fit_array.reshape(data_shape[0:3]), mask)

    return fit_array
Beispiel #5
0
def peaks_from_nifti(fdwi, fbvec=None, fbval=None, mask=None):

    if '.' not in fdwi:
        fbase = fdwi
        fdwi = fdwi+".nii.gz"
        if not fbval:
            fbval = fbase+".bval"
        if not fbvec:
            fbvec = fbase+".bvec"
    print fdwi
    img = nib.load(fdwi)
    data = img.get_data()
    zooms = img.get_header().get_zooms()[:3]
    affine = img.get_affine()
    bval, bvec = dio.read_bvals_bvecs(fbval, fbvec)
    gtab = dgrad.gradient_table(bval, bvec)


    if not mask:
        print 'generate mask'
        maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)

    else:
        mask_img = nib.load(mask)
        mask = mask_img.get_data()

        from dipy.segment.mask import applymask
        maskdata = applymask(data, mask)

    print maskdata.shape, mask.shape


    from dipy.reconst.shm import QballModel, CsaOdfModel
    model = QballModel(gtab, 6)

    sphere = get_sphere('symmetric724')

    print "fit Qball peaks"
    proc_num = multiprocessing.cpu_count()-1
    print "peaks_from_model using core# =" + str(proc_num)

    peaks = peaks_from_model(model=model, data=maskdata, relative_peak_threshold=.5,
                            min_separation_angle=25,
        sphere=sphere, mask=mask, parallel=True, nbr_processes=proc_num)

    return peaks
Beispiel #6
0
def save_nifti(imgdata, header, affine, mask=None, nam=None, pth=None):
    # Apply mask
    from dipy.segment.mask import applymask
    if not mask:
        data = imgdata
        print('Not found mask image. Using implicit mask instead.')
    elif os.path.isfile(mask):
        mask = nib.load(mask)
        try:
            maskdata = mask.get_fdata()
        except:
            maskdata = mask.get_data()
        print('Applying mask to %s' % nam)
        data = applymask(imgdata, maskdata)
    else:
        data = imgdata
        print('Not found mask image. Using implicit mask instead.')

    # Modify header
    header['descrip'] = "DTI-NODDI " + nam
    # Save nifti
    newImg = nib.Nifti1Image(data.astype(np.float64), affine, header=header)
    nib.save(newImg, pth + "DTINODDI_" + nam + '.nii.gz')
Beispiel #7
0
def compute_ssst_frf(data,
                     bvals,
                     bvecs,
                     mask=None,
                     mask_wm=None,
                     fa_thresh=0.7,
                     min_fa_thresh=0.5,
                     min_nvox=300,
                     roi_radii=10,
                     roi_center=None,
                     force_b0_threshold=False):
    """Compute a single-shell (under b=1500), single-tissue single Fiber
    Response Function from a DWI volume.
    A DTI fit is made, and voxels containing a single fiber population are
    found using a threshold on the FA.

    Parameters
    ----------
    data : ndarray
        4D Input diffusion volume with shape (X, Y, Z, N)
    bvals : ndarray
        1D bvals array with shape (N,)
    bvecs : ndarray
        2D bvecs array with shape (N, 3)
    mask : ndarray, optional
        3D mask with shape (X,Y,Z)
        Binary mask. Only the data inside the mask will be used for
        computations and reconstruction. Useful if no white matter mask is
        available.
    mask_wm : ndarray, optional
        3D mask with shape (X,Y,Z)
        Binary white matter mask. Only the data inside this mask and above the
        threshold defined by fa_thresh will be used to estimate the fiber
        response function.
    fa_thresh : float, optional
        Use this threshold as the initial threshold to select single fiber
        voxels. Defaults to 0.7
    min_fa_thresh : float, optional
        Minimal value that will be tried when looking for single fiber voxels.
        Defaults to 0.5
    min_nvox : int, optional
        Minimal number of voxels needing to be identified as single fiber
        voxels in the automatic estimation. Defaults to 300.
    roi_radii : int or array-like (3,), optional
        Use those radii to select a cuboid roi to estimate the FRF. The roi
        will be a cuboid spanning from the middle of the volume in each
        direction with the different radii. Defaults to 10.
    roi_center : tuple(3), optional
        Use this center to span the roi of size roi_radius (center of the
        3D volume).
    force_b0_threshold : bool, optional
        If set, will continue even if the minimum bvalue is suspiciously high.

    Returns
    -------
    full_reponse : ndarray
        Fiber Response Function, with shape (4,)

    Raises
    ------
    ValueError
        If less than `min_nvox` voxels were found with sufficient FA to
        estimate the FRF.
    """
    if min_fa_thresh < 0.4:
        logging.warning(
            "Minimal FA threshold ({:.2f}) seems really small. "
            "Make sure it makes sense for this dataset.".format(min_fa_thresh))

    if not is_normalized_bvecs(bvecs):
        logging.warning("Your b-vectors do not seem normalized...")
        bvecs = normalize_bvecs(bvecs)

    check_b0_threshold(force_b0_threshold, bvals.min())

    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    if mask is not None:
        data = applymask(data, mask)

    if mask_wm is not None:
        data = applymask(data, mask_wm)
    else:
        logging.warning(
            "No white matter mask specified! Only mask will be used "
            "(if it has been supplied). \nBe *VERY* careful about the "
            "estimation of the fiber response function to ensure no invalid "
            "voxel was used.")

    # Iteratively trying to fit at least min_nvox voxels. Lower the FA threshold
    # when it doesn't work. Fail if the fa threshold is smaller than
    # the min_threshold.
    # We use an epsilon since the -= 0.05 might incur numerical imprecision.
    nvox = 0
    while nvox < min_nvox and fa_thresh >= min_fa_thresh - 0.00001:
        mask = mask_for_response_ssst(gtab,
                                      data,
                                      roi_center=roi_center,
                                      roi_radii=roi_radii,
                                      fa_thr=fa_thresh)
        nvox = np.sum(mask)
        response, ratio = response_from_mask_ssst(gtab, data, mask)

        logging.debug(
            "Number of indices is {:d} with threshold of {:.2f}".format(
                nvox, fa_thresh))
        fa_thresh -= 0.05

    if nvox < min_nvox:
        raise ValueError(
            "Could not find at least {:d} voxels with sufficient FA "
            "to estimate the FRF!".format(min_nvox))

    logging.debug("Found {:d} voxels with FA threshold {:.2f} for "
                  "FRF estimation".format(nvox, fa_thresh + 0.05))
    logging.debug("FRF eigenvalues: {}".format(str(response[0])))
    logging.debug("Ratio for smallest to largest eigen value "
                  "is {:.3f}".format(ratio))
    logging.debug("Mean of the b=0 signal for voxels used "
                  "for FRF: {}".format(response[1]))

    full_response = np.array(
        [response[0][0], response[0][1], response[0][2], response[1]])

    return full_response
Beispiel #8
0
def compCsdPeaks(basename, output, mask=None):
    home = os.getcwd()

    fbase = basename

    fdwi = fbase+".nii.gz"
    fbval = fbase+".bval"
    fbvec = fbase+".bvec"

    print fdwi,fbval,fbvec

    img = nib.load(fdwi)
    data = img.get_data()
    zooms = img.get_header().get_zooms()[:3]
    affine = img.get_affine()

    # reslice image into 1x1x1 iso voxel

#    new_zooms = (1., 1., 1.)
#    data, affine = resample(data, affine, zooms, new_zooms)
#    img = nib.Nifti1Image(data, affine)
#
#    print data.shape
#    print img.get_header().get_zooms()
#    print "###"
#
#    nib.save(img, 'C5_iso.nii.gz')

    bval, bvec = dio.read_bvals_bvecs(fbval, fbvec)
    # invert bvec z for GE scanner
    bvec[:,1]*= -1
    gtab = dgrad.gradient_table(bval, bvec)

    if mask is None:
        print 'generate mask'
        maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)
    else:
        mask = nib.load(mask).get_data()
        maskdata = applymask(data, mask)



#    tenmodel = dti.TensorModel(gtab)
#    tenfit = tenmodel.fit(data)
#    print('Computing anisotropy measures (FA, MD, RGB)')
#
#
#    FA = fractional_anisotropy(tenfit.evals)
#    FA[np.isnan(FA)] = 0
#
#    fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
#    nib.save(fa_img, 'FA.nii.gz')
#
#    return





    # estimate response function, ratio should be ~0.2

    response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7)
    print response, ratio



    # reconstruct csd model
    print "estimate csd_model"
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    #a_data = maskdata[40:80, 40:80, 60:61]
    #c_data = maskdata[40:80, 59:60, 50:80]
    #s_data = maskdata[59:60, 40:70, 30:80]
    #data_small = a_data
    #
#    evals = response[0]
#    evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T
    #sphere = get_sphere('symmetric362')
    #csd_fit = csd_model.fit(data_small)
    #csd_odf = csd_fit.odf(sphere)
    #
    #
    #fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1, norm=False)
    ##fodf_spheres.GetProperty().SetOpacity(0.4)
    ##
    #fvtk.add(ren, fodf_spheres)
    ##fvtk.add(ren, fodf_peaks)
    #fvtk.show(ren)
    #
    #sys.exit()

    # fit csd peaks
    print "fit csd peaks"
    print "peaks_from_model using core# =" + str(multiprocessing.cpu_count())

    sphere = get_sphere('symmetric724')
    csd_peaks = peaks_from_model(model=csd_model,
                                 data=data,
                                 sphere=sphere,
                                 mask=mask,
                                 relative_peak_threshold=.5,
                                 min_separation_angle=25,
                                 parallel=True, nbr_processes=10)

    #fodf_peaks = fvtk.peaks(csd_peaks.peak_dirs, csd_peaks.peak_values, scale=1)

#    fd, fname = mkstemp()
#    pickle.save_pickle(fname, csd_peaks)
#
#    os.close(fd)

    #pickle.dump(csd_peaks, open("csd.p", "wb"))


    with open(output, 'wb') as fout:
        cPickle.dump(csd_peaks, fout, -1)


    print "done writing to file %s"% (output)
    return csd_peaks
Beispiel #9
0
def convert_sh_basis(shm_coeff,
                     sphere,
                     mask=None,
                     input_basis='descoteaux07',
                     nbr_processes=None):
    """Converts spherical harmonic coefficients between two bases

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    input_basis : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    shm_coeff_array : np.ndarray
        Spherical harmonic coefficients in the desired basis.
    """
    output_basis = 'descoteaux07' if input_basis == 'tournier07' else 'tournier07'

    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B_in, _ = sh_to_sf_matrix(sphere, sh_order, input_basis)
    _, invB_out = sh_to_sf_matrix(sphere, sh_order, output_basis)

    data_shape = shm_coeff.shape
    if mask is not None:
        shm_coeff = applymask(shm_coeff, mask)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
        or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff.ravel().reshape(np.prod(data_shape[0:3]),
                                          data_shape[3])
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        convert_sh_basis_parallel,
        zip(shm_coeff_chunks, itertools.repeat(B_in),
            itertools.repeat(invB_out), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    shm_coeff_array = np.zeros((np.prod(data_shape[0:3]), data_shape[3]))
    for i, new_shm_coeff in results:
        shm_coeff_array[chunk_len[i]:chunk_len[i + 1], :] = new_shm_coeff
    shm_coeff_array = shm_coeff_array.reshape(data_shape[0:3] +
                                              (data_shape[3], ))

    return shm_coeff_array
Beispiel #10
0
def maps_from_sh(shm_coeff,
                 peak_dirs,
                 peak_values,
                 peak_indices,
                 sphere,
                 mask=None,
                 gfa_thr=0,
                 sh_basis_type='descoteaux07',
                 nbr_processes=None):
    """Computes maps from given SH coefficients and peaks

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    peak_dirs : np.ndarray
        Peak directions
    peak_values : np.ndarray
        Peak values
    peak_indices : np.ndarray
        Peak indices
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    gfa_thr : float, optional
        Voxels with gfa less than `gfa_thr` are skipped for all metrics, except
        `rgb_map`.
        Default: 0
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        nufo_map, afd_max, afd_sum, rgb_map, gfa, qa
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is not None:
        shm_coeff = applymask(shm_coeff, mask)
        peak_dirs = applymask(peak_dirs, mask)
        peak_values = applymask(peak_values, mask)
        peak_indices = applymask(peak_indices, mask)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
        or nbr_processes < 0 else nbr_processes

    npeaks = peak_values.shape[3]
    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff.ravel().reshape(np.prod(data_shape[0:3]),
                                          data_shape[3])
    peak_dirs = peak_dirs.ravel().reshape(
        (np.prod(data_shape[0:3]), npeaks, 3))
    peak_values = peak_values.ravel().reshape(np.prod(data_shape[0:3]), npeaks)
    peak_indices = peak_indices.ravel().reshape(np.prod(data_shape[0:3]),
                                                npeaks)
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    peak_dirs_chunks = np.array_split(peak_dirs, nbr_processes)
    peak_values_chunks = np.array_split(peak_values, nbr_processes)
    peak_indices_chunks = np.array_split(peak_indices, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        maps_from_sh_parallel,
        zip(shm_coeff_chunks, peak_dirs_chunks, peak_values_chunks,
            peak_indices_chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(gfa_thr), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    nufo_map_array = np.zeros((np.prod(data_shape[0:3])))
    afd_max_array = np.zeros((np.prod(data_shape[0:3])))
    afd_sum_array = np.zeros((np.prod(data_shape[0:3])))
    rgb_map_array = np.zeros((np.prod(data_shape[0:3]), 3))
    gfa_map_array = np.zeros((np.prod(data_shape[0:3])))
    qa_map_array = np.zeros((np.prod(data_shape[0:3]), npeaks))
    for i, nufo_map, afd_max, afd_sum, rgb_map, gfa_map, qa_map in results:
        nufo_map_array[chunk_len[i]:chunk_len[i + 1]] = nufo_map
        afd_max_array[chunk_len[i]:chunk_len[i + 1]] = afd_max
        afd_sum_array[chunk_len[i]:chunk_len[i + 1]] = afd_sum
        rgb_map_array[chunk_len[i]:chunk_len[i + 1], :] = rgb_map
        gfa_map_array[chunk_len[i]:chunk_len[i + 1]] = gfa_map
        qa_map_array[chunk_len[i]:chunk_len[i + 1], :] = qa_map
    nufo_map_array = nufo_map_array.reshape(data_shape[0:3])
    afd_max_array = afd_max_array.reshape(data_shape[0:3])
    afd_sum_array = afd_sum_array.reshape(data_shape[0:3])
    rgb_map_array = rgb_map_array.reshape(data_shape[0:3] + (3, ))
    gfa_map_array = gfa_map_array.reshape(data_shape[0:3])
    qa_map_array = qa_map_array.reshape(data_shape[0:3] + (npeaks, ))

    afd_unique = np.unique(afd_max_array)
    if np.array_equal(np.array([0, 1]), afd_unique) \
        or np.array_equal(np.array([1]), afd_unique):
        logging.warning('All AFD_max values are 1. The peaks seem normalized.')

    return (nufo_map_array, afd_max_array, afd_sum_array, rgb_map_array,
            gfa_map_array, qa_map_array)
    #wenlin make this change-adress name to each animal
    #    print('DTI duration %.3f' % (duration1,))
    print(runno + ' DTI duration %.3f' % (duration1, ))
    #response : 3.96154132e-04, 9.23377324e-05, 9.23377324e-05

    #replace CSA with CSD
    # Build Brain Mask
    t2 = time()
    myroi = 120
    # bm = np.where(labels == myroi, False, True)
    roimask = (labels == myroi) * 1
    # Compute odfs in Brain Mask
    t2 = time()
    #add fa_thresh=0.5 for wm
    response, ratio, nvl = auto_response(gtab,
                                         applymask(data, roimask),
                                         fa_thresh=0.5,
                                         roi_radius=radius,
                                         return_number_of_voxels=True)
    print(
        'We use the roi_radius={},\nand the response is {},\nthe ratio is {},\nusing {} of voxels'
        .format(radius, response, ratio, nvl))
    #np.savetxt(outpath+runno+'.txt',response)
    csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=4)

    #csd peack is 4D with very voxel having a set of sh coefficients
    '''
    csd_peaks = peaks_from_model(csd_model, data, sphere=peaks.default_sphere,
                                 relative_peak_threshold=0.5, min_separation_angle=60,mask=bm, 
                                 return_odf=True,return_sh=True, parallel=True, sh_order=4, sh_basis_type='tournier07', 
                                 nbr_processes=multiprocessing.cpu_count())
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    if args.isVerbose:
        logging.basicConfig(level=logging.DEBUG)

    if (args.hsv_sat is not None or
       args.hsv_value is not None) and \
       args.scale_colors is False:
        logging.warning("Percentage option will be not used because "
                        "--scale_colors option is not activated.")

    if args.scale_colors:
        if args.hsv_sat is None:
            args.hsv_sat = 0.75
        if args.hsv_value is None:
            args.hsv_value = 0.75

    if not os.path.isfile(args.fodf_file):
        parser.error('"{0}" must be a file!'.format(args.fodf_file))

    if args.mask and not os.path.isfile(args.mask):
        parser.error('"{0}" must be a file!'.format(args.mask))

    if os.path.isfile(args.rgb_file):
        if args.isForce:
            logging.info('Overwriting "{0}".'.format(args.rgb_file))
        else:
            parser.error(
                '"{0}" already exists! Use -f to overwrite it.'.format(
                    args.rgb_file))

    fodf = nib.load(args.fodf_file)
    fodf_data = fodf.get_data()

    if args.mask:
        wm = nib.load(args.mask)
        fodf_data = applymask(fodf_data, wm.get_data())

    sphere = get_sphere('repulsion724')
    SH = SphericalHarmonics(fodf_data, args.basis, sphere)

    rgb = np.zeros(fodf.shape[0:3] + (3, ))
    indices = np.argwhere(np.any(fodf.get_data(), axis=3))
    max_sf = 0

    for ind in indices:
        ind = tuple(ind)

        SF = SH.get_SF(fodf_data[ind])
        # set min to 0
        SF = SF.clip(min=0)

        sum_sf = np.sum(SF)
        max_sf = np.maximum(max_sf, sum_sf)

        if sum_sf > 0:
            rgb[ind] = np.sum(np.abs(sphere.vertices) * SF, axis=0)
            rgb[ind] /= np.linalg.norm(rgb[ind])
            rgb[ind] *= sum_sf

    rgb /= max_sf

    if args.scale_colors:
        for ind in indices:
            ind = tuple(ind)
            if np.sum(rgb[ind]) > 0:
                tmpHSV = np.array(
                    colorsys.rgb_to_hsv(rgb[ind][0], rgb[ind][1], rgb[ind][2]))

                tmpHSV[1] = args.hsv_sat
                tmpHSV[2] = args.hsv_value

                rgb[ind] = np.array(
                    colorsys.hsv_to_rgb(tmpHSV[0], tmpHSV[1], tmpHSV[2]))

    rgb *= 255
    nib.Nifti1Image(rgb.astype('uint8'),
                    fodf.get_affine()).to_filename(args.rgb_file)
Beispiel #13
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs])
    assert_outputs_exists(parser, args, [args.frf_file])

    vol = nib.load(args.input)
    data = vol.get_data()

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    check_b0_threshold(args, bvals.min())
    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    if args.min_fa_thresh < 0.4:
        logging.warn(
            'Minimal FA threshold ({}) seems really small. Make sure it '
            'makes sense for this dataset.'.format(args.min_fa_thresh))

    if args.mask:
        mask = nib.load(args.mask).get_data().astype(np.bool)
        data = applymask(data, mask)

    if args.mask_wm:
        wm_mask = nib.load(args.mask_wm).get_data().astype('bool')
    else:
        wm_mask = np.ones_like(data[..., 0], dtype=np.bool)
        logging.warn(
            'No white matter mask specified! mask_data will be used instead, '
            'if it has been supplied. \nBe *VERY* careful about the '
            'estimation of the fiber response function to ensure no invalid '
            'voxel was used.')

    data_in_wm = applymask(data, wm_mask)

    fa_thresh = args.fa_thresh
    # Iteratively trying to fit at least 300 voxels. Lower the FA threshold
    # when it doesn't work. Fail if the fa threshold is smaller than
    # the min_threshold.
    # We use an epsilon since the -= 0.05 might incurs numerical imprecision.
    nvox = 0
    while nvox < args.min_nvox and fa_thresh >= args.min_fa_thresh - 0.00001:
        response, ratio, nvox = auto_response(gtab,
                                              data_in_wm,
                                              roi_center=args.roi_center,
                                              roi_radius=args.roi_radius,
                                              fa_thr=fa_thresh,
                                              return_number_of_voxels=True)

        logging.debug('Number of indices is %s with threshold of %s', nvox,
                      fa_thresh)
        fa_thresh -= 0.05

    if nvox < args.min_nvox:
        raise ValueError(
            "Could not find at least {} voxels with sufficient FA "
            "to estimate the FRF!".format(args.min_nvox))

    logging.debug("Found %i voxels with FA threshold %f for FRF estimation",
                  nvox, fa_thresh + 0.05)
    logging.debug("FRF eigenvalues: %s", str(response[0]))
    logging.debug("Ratio for smallest to largest eigen value is %f", ratio)
    logging.debug("Mean of the b=0 signal for voxels used for FRF: %f",
                  response[1])

    full_response = np.array(
        [response[0][0], response[0][1], response[0][2], response[1]])

    np.savetxt(args.frf_file, full_response)
Beispiel #14
0
def main():
    params = readArgs()
    # read in from the command line
    read_args = params.collect_args()
    params.check_args(read_args)

    # get img obj
    dwi_img = nib.load(params.dwi_)
    mask_img = nib.load(params.mask_)

    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(params.bval_, params.bvec_)

    # need to create the gradient table yo
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs, b0_threshold=25)

    # get the data from image objects
    dwi_data = dwi_img.get_data()
    mask_data = mask_img.get_data()
    # and get affine
    img_affine = dwi_img.affine

    from dipy.data import get_sphere
    sphere = get_sphere('repulsion724')

    from dipy.segment.mask import applymask
    dwi_data = applymask(dwi_data, mask_data)

    printfl('dwi_data.shape (%d, %d, %d, %d)' % dwi_data.shape)
    printfl('\nYour bvecs look like this:{0}'.format(bvecs))
    printfl('\nYour bvals look like this:{0}\n'.format(bvals))

    from dipy.reconst.shm import anisotropic_power, sph_harm_lookup, smooth_pinv, normalize_data
    from dipy.core.sphere import HemiSphere

    smooth = 0.0
    normed_data = normalize_data(dwi_data, gtab.b0s_mask)
    normed_data = normed_data[..., np.where(1 - gtab.b0s_mask)[0]]

    from dipy.core.gradients import gradient_table_from_bvals_bvecs
    gtab2 = gradient_table_from_bvals_bvecs(
        gtab.bvals[np.where(1 - gtab.b0s_mask)[0]],
        gtab.bvecs[np.where(1 - gtab.b0s_mask)[0]])

    signal_native_pts = HemiSphere(xyz=gtab2.bvecs)
    sph_harm_basis = sph_harm_lookup.get(None)
    Ba, m, n = sph_harm_basis(params.sh_order_, signal_native_pts.theta,
                              signal_native_pts.phi)

    L = -n * (n + 1)
    invB = smooth_pinv(Ba, np.sqrt(smooth) * L)

    # fit SH basis to DWI signal
    normed_data_sh = np.dot(normed_data, invB.T)

    # power map call
    printfl("fitting power map")
    pow_map = anisotropic_power(normed_data_sh,
                                norm_factor=0.00001,
                                power=2,
                                non_negative=True)

    pow_map_img = nib.Nifti1Image(pow_map.astype(np.float32), img_affine)
    # make output name
    out_name = ''.join(
        [params.output_, '_powMap_sh',
         str(params.sh_order_), '.nii.gz'])

    printfl("writing power map to: {}".format(out_name))
    nib.save(pow_map_img, out_name)
Beispiel #15
0
    def main(self):

        self.imgFile = str(self.imgFile)
        self.maskFile = str(self.maskFile)

        self.mk_low_high = [
            float(x) for x in self.mk_low_high[1:-1].split(',')
        ]
        self.mk_low_high.sort()
        self.fa_low_high = [
            float(x) for x in self.fa_low_high[1:-1].split(',')
        ]
        self.fa_low_high.sort()
        self.md_low_high = [
            float(x) for x in self.md_low_high[1:-1].split(',')
        ]
        self.md_low_high.sort()
        hdr = None
        affine = None

        if not self.out_dir:
            self.out_dir = os.path.dirname(self.imgFile)

        outPrefix = os.path.join(self.out_dir,
                                 os.path.basename(self.imgFile).split('.')[0])

        if self.imgFile.endswith('.nii.gz') or self.imgFile.endswith('.nii'):
            bvals, bvecs = read_bvals_bvecs(self.bvalFile, self.bvecFile)
            outFormat = '.nii.gz'

            img = nib.load(self.imgFile)
            data = img.get_data()

            affine = img.affine
            grad_axis = 3
            if len(data.shape) != 4:
                raise AttributeError('Not a valid dwi, check dimension')

        elif self.imgFile.endswith('.nrrd') or self.imgFile.endswith('.nhdr'):

            img = nrrd.read(self.imgFile)
            data = img[0]
            hdr = img[1]

            outFormat = '.nrrd'

            bvals, bvecs, b_max, grad_axis, N = nrrd_bvals_bvecs(hdr)

            # put the gradients along last axis
            if grad_axis != 3:
                data = np.moveaxis(data, grad_axis, 3)

        # provide the user a liberty to specify different file formats for dwi and mask
        if self.maskFile.endswith('.nii.gz') or self.maskFile.endswith('.nii'):
            mask_data = nib.load(self.maskFile).get_data()

        elif self.maskFile.endswith('.nrrd') or self.maskFile.endswith(
                '.nhdr'):
            mask_data = nrrd.read(self.maskFile)[0]

        data = applymask(data, mask_data)

        gtab = gradient_table(bvals, bvecs)

        dtimodel = dti.TensorModel(gtab)
        dtifit = dtimodel.fit(data, mask_data)
        evals = dtifit.evals
        fa = dtifit.fa
        md = dtifit.md
        ad = dtifit.ad
        rd = dtifit.rd
        evals_zero = evals < 0.
        evals_zero_mask = (evals_zero[..., 0] | evals_zero[..., 1]
                           | evals_zero[..., 2]) * 1

        mkFlag = check_multi_b(gtab, n_bvals=3)
        if mkFlag:
            dkimodel = dki.DiffusionKurtosisModel(gtab)
            dkifit = dkimodel.fit(data, mask_data)
            mk = dkifit.mk(
                0, 3)  # http://nipy.org/dipy/examples_built/reconst_dki.html

        else:
            warnings.warn(
                "DIPY DKI requires at least 3 b-shells (which can include b=0), "
                "kurtosis quality cannot be computed.")

        fa_mask = mask_calc(fa, self.fa_low_high)
        md_mask = mask_calc(md, self.md_low_high)

        where_b0s = np.where(bvals == 0)[0]
        where_dwi = np.where(bvals != 0)[0]
        bse_data = data[..., where_b0s].mean(-1)
        b0File = outPrefix + '_b0' + outFormat
        save_map(b0File, bse_data, affine, hdr)

        # prevent division by zero during normalization
        bse_data[bse_data < 1] = 1.
        extend_bse = np.expand_dims(bse_data, grad_axis)
        extend_bse = np.repeat(extend_bse, len(where_dwi), grad_axis)
        curtail_dwi = np.take(data, where_dwi, axis=grad_axis)

        # 1 / b0 * min(b0 - Gi)
        minOverGrads = np.min(extend_bse - curtail_dwi,
                              axis=grad_axis) / bse_data

        # another way to prevent division by zero: 1/b0 * min(b0-Gi) with condition at b0~eps
        # minOverGrads = np.min(extend_bse - curtail_dwi, axis=grad_axis) / (bse_data + eps)
        # minOverGrads[(bse_data < eps) & (minOverGrads < 5 * eps)] = 0.
        # minOverGrads[(bse_data < eps) & (minOverGrads > 5 * eps)] = 10.

        minOverGradsNegativeMask = (minOverGrads < 0) * 1

        # compute histograms
        print('\nminOverGrads (b0-Gi)/b0 histogram')
        bins = [-inf, 0, inf]
        negative, _ = hist_calc(minOverGrads, bins)

        print('\nevals<0 histogram')
        bins = [-inf, 0, inf]
        hist_calc(evals, bins)

        print('\nfractional anisotropy histogram')
        bins = form_bins(self.fa_low_high)
        hist_calc(fa, bins)

        print('\nmean diffusivity histogram')
        bins = form_bins(self.md_low_high)
        hist_calc(md, bins)

        if mkFlag:
            print('\nmean kurtosis mask')
            bins = form_bins(self.mk_low_high)
            hist_calc(mk, bins)

        # save histograms
        print('\nCreating minOverGrads image ...')
        save_map(outPrefix + '_minOverGrads' + outFormat, minOverGrads, affine,
                 hdr)
        print('\nCreating minOverGrads<0 mask ...')
        save_map(outPrefix + '_minOverGradsMask' + outFormat,
                 minOverGradsNegativeMask.astype('short'), affine, hdr)

        print('\nCreating evals<0 mask ...')
        save_map(outPrefix + '_evalsZeroMask' + outFormat,
                 evals_zero_mask.astype('short'), affine, hdr)

        if mkFlag:
            mk_mask = mask_calc(mk, self.mk_low_high)
            print('\nCreating mk image ...')
            save_map(outPrefix + '_MK' + outFormat, mk, affine, hdr)
            print('Creating mk out of range mask ...')
            save_map(outPrefix + '_MK_mask' + outFormat,
                     mk_mask.astype('short'), affine, hdr)
        else:
            mk = np.zeros(fa.shape)

        print('\nCreating fa image ...')
        save_map(outPrefix + '_FA' + outFormat, fa, affine, hdr)
        print('Creating fa out of range mask ...')
        save_map(outPrefix + '_FA_mask' + outFormat, fa_mask.astype('short'),
                 affine, hdr)

        print('\nCreating md image ....')
        save_map(outPrefix + '_MD' + outFormat, md, affine, hdr)
        print('Creating md out of range mask ...')
        save_map(outPrefix + '_MD_mask' + outFormat, md_mask.astype('short'),
                 affine, hdr)

        # conclusion
        N_mask = mask_data.size
        print('\n\nConclusion: ')
        print('The masked dwi has %.5f%% voxels with values less than b0' %
              (negative * 100))
        print('The masked dwi has %.5f%% voxels with negative eigen value' %
              (evals_zero_mask.sum() / N_mask * 100))
        print('The masked dwi has %.5f%% voxels with FA out of [%f,%f]' %
              (fa_mask.sum() / N_mask * 100, self.fa_low_high[0],
               self.fa_low_high[1]))
        print('The masked dwi has %.5f%% voxels with MD out of [%f,%f]' %
              (md_mask.sum() / N_mask * 100, self.md_low_high[0],
               self.md_low_high[1]))
        if mkFlag:
            print('The masked dwi has %.5f%% voxels with MK out of [%f,%f]' %
                  (mk_mask.sum() / N_mask * 100, self.mk_low_high[0],
                   self.mk_low_high[1]))

        # perform roi based analysis
        if self.template and self.labelMap:
            antsReg(b0File, self.maskFile, self.template, outPrefix)
            warp = outPrefix + '1Warp.nii.gz'
            trans = outPrefix + '0GenericAffine.mat'
            outLabelMapFile = outPrefix + '_labelMap.nii.gz'
            applyXform(self.labelMap,
                       b0File,
                       warp,
                       trans,
                       outLabelMapFile,
                       interp='NearestNeighbor')
            rm = local['rm']
            rm(warp, trans, outPrefix + 'Warped.nii.gz',
               outPrefix + '1InverseWarp.nii.gz',
               outPrefix + 'InverseWarped.nii.gz')

            outLabelMap = nib.load(outLabelMapFile).get_data()
            labels = np.unique(outLabelMap)[1:]
            label2name = parse_labels(labels,
                                      self.lut._path if self.lut else None)

            print('Creating ROI based statistics ...')
            stat_file = outPrefix + f'_{self.name}_stat.csv'

            df = pd.DataFrame(columns=[
                'region',
                'FA_mean',
                'FA_std',
                'MD_mean',
                'MD_std',
                'AD_mean',
                'AD_std',
                'RD_mean',
                'RD_std',
                'total_{min_i(b0-Gi)<0}',
                'total_evals<0',
                'MK_mean',
                'MK_std',
            ])

            for i, label in enumerate(label2name.keys()):
                roi = outLabelMap == int(label)

                properties = [
                    num2str(x) for x in [
                        fa[roi > 0].mean(), fa[roi > 0].std(), md[roi > 0].
                        mean(), md[roi > 0].std(), ad[roi > 0].mean(), ad[
                            roi > 0].std(), rd[roi > 0].mean(),
                        rd[roi > 0].std(), minOverGradsNegativeMask[
                            roi > 0].sum(), evals_zero_mask[roi > 0].sum(), mk[
                                roi > 0].mean(), mk[roi > 0].std()
                    ]
                ]

                df.loc[i] = [label2name[label]] + properties

            df = df.set_index('region')
            # print(df)
            df.to_csv(stat_file)
            print('See ', os.path.abspath(stat_file))
Beispiel #16
0
def peaks_from_sh(shm_coeff,
                  sphere,
                  mask=None,
                  relative_peak_threshold=0.5,
                  absolute_threshold=0,
                  min_separation_angle=25,
                  normalize_peaks=False,
                  npeaks=5,
                  sh_basis_type='descoteaux07',
                  nbr_processes=None):
    """Computes peaks from given spherical harmonic coefficients

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    relative_peak_threshold : float, optional
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
        Default: 0.5
    absolute_threshold : float, optional
        Absolute threshold on fODF amplitude. This value should be set to
        approximately 1.5 to 2 times the maximum fODF amplitude in isotropic
        voxels (ex. ventricles). `scil_compute_fodf_max_in_ventricles.py`
        can be used to find the maximal value.
        Default: 0
    min_separation_angle : float in [0, 90], optional
        The minimum distance between directions. If two peaks are too close
        only the larger of the two is returned.
        Default: 25
    normalize_peaks : bool, optional
        If true, all peak values are calculated relative to `max(odf)`.
    npeaks : int, optional
        Maximum number of peaks found (default 5 peaks).
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        peak_dirs, peak_values, peak_indices
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is not None:
        shm_coeff = applymask(shm_coeff, mask)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
        or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff.ravel().reshape(np.prod(data_shape[0:3]),
                                          data_shape[3])
    chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        peaks_from_sh_parallel,
        zip(chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(relative_peak_threshold),
            itertools.repeat(absolute_threshold),
            itertools.repeat(min_separation_angle), itertools.repeat(npeaks),
            itertools.repeat(normalize_peaks), np.arange(len(chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    peak_dirs_array = np.zeros((np.prod(data_shape[0:3]), npeaks, 3))
    peak_values_array = np.zeros((np.prod(data_shape[0:3]), npeaks))
    peak_indices_array = np.zeros((np.prod(data_shape[0:3]), npeaks))
    for i, peak_dirs, peak_values, peak_indices in results:
        peak_dirs_array[chunk_len[i]:chunk_len[i + 1], :] = peak_dirs
        peak_values_array[chunk_len[i]:chunk_len[i + 1], :] = peak_values
        peak_indices_array[chunk_len[i]:chunk_len[i + 1], :] = peak_indices
    peak_dirs_array = peak_dirs_array.reshape(data_shape[0:3] + (npeaks, 3))
    peak_values_array = peak_values_array.reshape(data_shape[0:3] + (npeaks, ))
    peak_indices_array = peak_indices_array.reshape(data_shape[0:3] +
                                                    (npeaks, ))

    return peak_dirs_array, peak_values_array, peak_indices_array
Beispiel #17
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    if not args.not_all:
        args.fodf = args.fodf or 'fodf.nii.gz'
        args.peaks = args.peaks or 'peaks.nii.gz'
        args.peak_indices = args.peak_indices or 'peak_indices.nii.gz'

    arglist = [args.fodf, args.peaks, args.peak_indices]
    if args.not_all and not any(arglist):
        parser.error('When using --not_all, you need to specify at least '
                     'one file to output.')

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs])
    assert_outputs_exists(parser, args, arglist)

    nbr_processes = args.nbr_processes
    parallel = True
    if nbr_processes <= 0:
        nbr_processes = None
    elif nbr_processes == 1:
        parallel = False

    # Check for FRF filename
    base_odf_name, _ = split_name_with_nii(args.fodf)
    frf_filename = base_odf_name + '_frf.txt'
    if os.path.isfile(frf_filename) and not args.overwrite:
        parser.error('Cannot save frf file, "{0}" already exists. '
                     'Use -f to overwrite.'.format(frf_filename))

    vol = nib.load(args.input)
    data = vol.get_data()

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if args.mask_wm is not None:
        wm_mask = nib.load(args.mask_wm).get_data().astype('bool')
    else:
        wm_mask = np.ones_like(data[..., 0], dtype=np.bool)
        logging.info(
            'No white matter mask specified! mask_data will be used instead, '
            'if it has been supplied. \nBe *VERY* careful about the '
            'estimation of the fiber response function for the CSD.')

    data_in_wm = applymask(data, wm_mask)

    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    if bvals.min() != 0:
        if bvals.min() > 20:
            raise ValueError(
                'The minimal bvalue is greater than 20. This is highly '
                'suspicious. Please check your data to ensure everything is '
                'correct.\nValue found: {}'.format(bvals.min()))
        else:
            logging.warning(
                'Warning: no b=0 image. Setting b0_threshold to '
                'bvals.min() = %s', bvals.min())
            gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())
    else:
        gtab = gradient_table(bvals, bvecs)

    if args.mask is None:
        mask = None
    else:
        mask = nib.load(args.mask).get_data().astype(np.bool)

    # Raise warning for sh order if there is not enough DWIs
    if data.shape[-1] < (args.sh_order + 1) * (args.sh_order + 2) / 2:
        warnings.warn(
            'We recommend having at least %s unique DWIs volumes, but you '
            'currently have %s volumes. Try lowering the parameter --sh_order '
            'in case of non convergence.',
            (args.sh_order + 1) * (args.sh_order + 2) / 2), data.shape[-1]
    fa_thresh = args.fa_thresh

    # If threshold is too high, try lower until enough indices are found
    # estimating a response function with fa < 0.5 does not make sense
    nvox = 0
    while nvox < 300 and fa_thresh > 0.5:
        response, ratio, nvox = auto_response(gtab,
                                              data_in_wm,
                                              roi_center=args.roi_center,
                                              roi_radius=args.roi_radius,
                                              fa_thr=fa_thresh,
                                              return_number_of_voxels=True)

        logging.info('Number of indices is %s with threshold of %s', nvox,
                     fa_thresh)
        fa_thresh -= 0.05

        if fa_thresh <= 0:
            raise ValueError(
                'Could not find at least 300 voxels for estimating the frf!')

    logging.info('Found %s valid voxels for frf estimation.', nvox)

    response = list(response)
    logging.info('Response function is %s', response)

    if args.frf is not None:
        l01 = np.array(literal_eval(args.frf), dtype=np.float64)
        if not args.no_factor:
            l01 *= 10**-4

        response[0] = np.array([l01[0], l01[1], l01[1]])
        ratio = l01[1] / l01[0]

    logging.info("Eigenvalues for the frf of the input data are: %s",
                 response[0])
    logging.info("Ratio for smallest to largest eigen value is %s", ratio)
    np.savetxt(frf_filename, response[0])

    if not args.frf_only:
        reg_sphere = get_sphere('symmetric362')
        peaks_sphere = get_sphere('symmetric724')

        csd_model = ConstrainedSphericalDeconvModel(gtab,
                                                    response,
                                                    reg_sphere=reg_sphere,
                                                    sh_order=args.sh_order)

        peaks_csd = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=peaks_sphere,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     mask=mask,
                                     return_sh=True,
                                     sh_basis_type=args.basis,
                                     sh_order=args.sh_order,
                                     normalize_peaks=True,
                                     parallel=parallel,
                                     nbr_processes=nbr_processes)

        if args.fodf:
            nib.save(
                nib.Nifti1Image(peaks_csd.shm_coeff.astype(np.float32),
                                vol.affine), args.fodf)

        if args.peaks:
            nib.save(
                nib.Nifti1Image(reshape_peaks_for_visualization(peaks_csd),
                                vol.affine), args.peaks)

        if args.peak_indices:
            nib.save(nib.Nifti1Image(peaks_csd.peak_indices, vol.affine),
                     args.peak_indices)
Beispiel #18
0
img = nib.load(fdwi)
img_data = img.get_data()
# Load the mask
mask = nib.load(fmask)
mask_data = mask.get_data()

#load bvals, bvecs and gradient files
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)

# Apply the mask to the volume
mask_boolean = mask_data > 0.01
mins, maxs = bounding_box(mask_boolean)
mask_boolean = crop(mask_boolean, mins, maxs)
cropped_volume = crop(img_data, mins, maxs)
data = applymask(cropped_volume, mask_boolean)

fw_runner = FreewaterRunner(data, gtab)
fw_runner.LOG = True  # turn on logging for this example
fw_runner.run_model(num_iter=100, dt=0.001)

# save the loss function to the working directory
#freewater_runner.plot_loss()

# Save the free water map somewhere
fw_file = output_directory + "/freewater.nii.gz"
nib.save(nib.Nifti1Image(fw_runner.get_fw_map(), img.affine), fw_file)

fw_md_file = output_directory + "/freewater_md.nii.gz"
nib.save(nib.Nifti1Image(fw_runner.get_fw_md(), img.affine), fw_md_file)
Beispiel #19
0
if __name__ == "__main__":

    # Loading values, vectors, image and mask
    sphere = get_sphere('symmetric362')
    print "loading bval/bvec files"
    bvals, bvecs = read_bvals_bvecs("tp3_data//bvals2000", "tp3_data//bvecs2000")
    gtab = gradient_table(bvals, bvecs)

    print "loading nifti files"
    img = nib.load("tp3_data//dwi2000.nii.gz")
    affine = img.get_affine()
    data = img.get_data()
    mask = nib.load("tp3_data//_binary_mask.nii.gz").get_data()

    ## Apply mask
    data_in_wm = applymask(data, mask)
    response, ratio = auto_response(gtab, data_in_wm)

    # Computing ODF
    print "computing fODF... please wait an hour"
    csd_model = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere)
    peaks_csd = peaks_from_model(model=csd_model,
                                 data=data,
                                 sphere=sphere,
                                 relative_peak_threshold=.25,
                                 min_separation_angle=25,
                                 mask=mask,
                                 normalize_peaks=True,
                                 parallel=True)
    # Saving files
    print "saving files"