예제 #1
0
def initial_params(data, bvecs, bvals, model, mask=None, params_file='temp'):
    """
    Determine the initial values for fitting the isotropic diffusion model.
    This only works on the models that fit to the relative diffusion signal.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvecs: 2 dimensional array
        All the b vectors
    bvals: 1 dimensional array
        All b values
    model: str
        Isotropic model
    mask: 3 dimensional array
        Mask of the data
    params_file: obj or str
        File handle of the param_files containing the tensor parameters.

    Returns
    -------
    bounds: list
        A list containing the bounds for each parameter for least squares
        fitting.
    initial: list
        A list containing the initial values for each parameter for least
        squares fitting.
    """
    dti_mod = dti.TensorModel(data, bvecs, bvals, mask=mask,
                              params_file=params_file)

    d = dti_mod.mean_diffusivity[np.where(mask)]

    # Find initial noise floor
    _, b_inds, _, _ = ozu.separate_bvals(bvals)
    b0_data = data[np.where(mask)][:, b_inds[0]]
    #nf = np.std(b0_data, -1)/np.mean(b0_data, -1)
    nf = np.min(data[np.where(mask)], -1)
    if model == single_exp_rs:
        bounds = [(0, 4)]
        initial = d

    elif model == single_exp_nf_rs:
        bounds = [(0, 10000), (0, 4)]
        initial = np.concatenate([nf[..., None],
                                   np.ones(d[...,None].shape)], -1)

    elif model== bi_exp_rs:
        bounds = [(0, 1), (0, 4), (0, 4)]
        initial = np.concatenate([0.5*np.ones((len(d),1)), d[...,None],
                                                      d[...,None]], -1)
    elif model== bi_exp_nf_rs:
        bounds = [(0, 10000), (0, 1), (0, 4), (0, 4)]
        initial = np.concatenate([nf[..., None], 0.5*np.ones((len(d),1)),
                                           d[...,None], d[...,None]], -1)
    return bounds, initial
예제 #2
0
파일: io.py 프로젝트: zhangerjun/osmosis
def rm_ventricles(wm_data_file, bvals, bvecs, data, data_path):
    """
    Removes the ventricles from the white matter mask and saves a new
    white matter mask.

    Parameters
    ----------
    wm_data_file: obj
        File handle for the white matter mask
    bvals: 1 dimensional array
        All b values
    bvecs: 2 dimensional array
        All the b vectors
    data: 4 dimensional array
        The diffusion data
    data_path: str
        Path to the data directory with all the sub files.

    Returns
    -------
    new_wm_mask: 3 dimensional array
        A new white matter mask with the ventricles removed.
    """
    # Separate b-values and find the indices corresponding to the b0 and
    # bNk measurements where N = the lowest b-value other than 0
    wm_data = wm_data_file.get_data()
    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)
    bNk_b0_inds = np.concatenate((b_inds[0], b_inds[1]))

    # Fit a tensor model
    tm = dti.TensorModel(data[..., bNk_b0_inds],
                         bvecs[:, bNk_b0_inds],
                         bvals[bNk_b0_inds],
                         mask=wm_data,
                         params_file="temp")

    # Find the median, and 25th and 75th percentiles of mean diffusivities
    md_median = np.median(tm.mean_diffusivity[np.where(wm_data)])
    q1 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)], 25)
    q3 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)], 75)

    # Exclude voxels with MDs above median + 2*interquartile range
    md_exclude = md_median + 2 * (q3 - q1)
    md_include = np.where(tm.mean_diffusivity[np.where(wm_data)] < md_exclude)
    new_wm_mask = np.zeros(wm_data.shape)
    new_wm_mask[np.where(wm_data)[0][md_include],
                np.where(wm_data)[1][md_include],
                np.where(wm_data)[2][md_include]] = 1

    wm = ni.Nifti1Image(new_wm_mask, wm_data_file.get_affine())
    ni.save(wm, os.path.join(data_path, 'wm_mask_no_vent.nii.gz'))

    return new_wm_mask
예제 #3
0
def sph_cc_ineq(cod_single_mod,
                cod_multi_mod,
                bvals,
                single_thresh,
                multi_thresh,
                tol=0.1):
    """
    Helper function to find the indices where inequalities occur between two given
    input arrays and to separate the b values.
    
    Parameters
    ----------
    cod_single_mod: 1 dimensional array
        Coefficient of determination at each voxel for the single fODF model
    cod_multi_mod: 1 dimensional array
        Coefficient of determination at each voxel for the multi fODF model
    bvals: 1 dimensional array
        All b values
    single_thresh: int
        Coefficient of determination threshold for the single fODF model
    multi_thresh: int
        Coefficient of determination threshold for the multi fODF model
    tol: float
        Tolerance
        
    Returns
    -------
    inds: 1 dimensional array
        Indices indicating the voxels within the COD inequality/equality
    b_inds: list
        List of indices corresponding to each b value
    all_b_inds: 1 dimensional array
        Indices corresponding to the non-zero b values
    """

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_inds = np.where(rounded_bvals != 0)

    if single_thresh > multi_thresh:
        inds = np.where((cod_single_mod > single_thresh)
                        & (cod_multi_mod < multi_thresh)
                        & (cod_multi_mod > multi_thresh - tol))
    elif single_thresh < multi_thresh:
        inds = np.where((cod_single_mod < single_thresh)
                        & (cod_single_mod > single_thresh - tol)
                        & (cod_multi_mod > multi_thresh))
    elif single_thresh == multi_thresh:
        inds = np.where((cod_single_mod < single_thresh + tol / 2)
                        & (cod_single_mod > single_thresh - tol / 2)
                        & (cod_multi_mod < multi_thresh + tol / 2)
                        & (cod_multi_mod > multi_thresh - tol / 2))

    return np.squeeze(inds), b_inds, all_b_inds
예제 #4
0
def rm_ventricles(wm_data_file, bvals, bvecs, data, data_path):
    """
    Removes the ventricles from the white matter mask and saves a new
    white matter mask.

    Parameters
    ----------
    wm_data_file: obj
        File handle for the white matter mask
    bvals: 1 dimensional array
        All b values
    bvecs: 2 dimensional array
        All the b vectors
    data: 4 dimensional array
        The diffusion data
    data_path: str
        Path to the data directory with all the sub files.

    Returns
    -------
    new_wm_mask: 3 dimensional array
        A new white matter mask with the ventricles removed.
    """
    # Separate b-values and find the indices corresponding to the b0 and
    # bNk measurements where N = the lowest b-value other than 0
    wm_data = wm_data_file.get_data()
    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)
    bNk_b0_inds = np.concatenate((b_inds[0], b_inds[1]))

    # Fit a tensor model
    tm = dti.TensorModel(data[..., bNk_b0_inds], bvecs[:, bNk_b0_inds],
                         bvals[bNk_b0_inds], mask=wm_data,
                         params_file = "temp")

    # Find the median, and 25th and 75th percentiles of mean diffusivities
    md_median = np.median(tm.mean_diffusivity[np.where(wm_data)])
    q1 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)],25)
    q3 = stats.scoreatpercentile(tm.mean_diffusivity[np.where(wm_data)],75)

    # Exclude voxels with MDs above median + 2*interquartile range
    md_exclude = md_median + 2*(q3 - q1)
    md_include = np.where(tm.mean_diffusivity[np.where(wm_data)] < md_exclude)
    new_wm_mask = np.zeros(wm_data.shape)
    new_wm_mask[np.where(wm_data)[0][md_include],
                np.where(wm_data)[1][md_include],
                np.where(wm_data)[2][md_include]] = 1

    wm = ni.Nifti1Image(new_wm_mask, wm_data_file.get_affine())
    ni.save(wm, os.path.join(data_path, 'wm_mask_no_vent.nii.gz'))

    return new_wm_mask
예제 #5
0
def sph_cc_ineq(cod_single_mod, cod_multi_mod, bvals, single_thresh, multi_thresh, tol = 0.1):
    """
    Helper function to find the indices where inequalities occur between two given
    input arrays and to separate the b values.
    
    Parameters
    ----------
    cod_single_mod: 1 dimensional array
        Coefficient of determination at each voxel for the single fODF model
    cod_multi_mod: 1 dimensional array
        Coefficient of determination at each voxel for the multi fODF model
    bvals: 1 dimensional array
        All b values
    single_thresh: int
        Coefficient of determination threshold for the single fODF model
    multi_thresh: int
        Coefficient of determination threshold for the multi fODF model
    tol: float
        Tolerance
        
    Returns
    -------
    inds: 1 dimensional array
        Indices indicating the voxels within the COD inequality/equality
    b_inds: list
        List of indices corresponding to each b value
    all_b_inds: 1 dimensional array
        Indices corresponding to the non-zero b values
    """
    
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_inds = np.where(rounded_bvals != 0)
    
    if single_thresh > multi_thresh:
        inds = np.where((cod_single_mod > single_thresh) &
                        (cod_multi_mod < multi_thresh) &
                        (cod_multi_mod > multi_thresh - tol))
    elif single_thresh < multi_thresh:
        inds = np.where((cod_single_mod < single_thresh) &
                        (cod_single_mod > single_thresh - tol) &
                        (cod_multi_mod > multi_thresh))
    elif single_thresh == multi_thresh:
        inds = np.where((cod_single_mod < single_thresh + tol/2) &
                        (cod_single_mod > single_thresh - tol/2) &
                        (cod_multi_mod < multi_thresh + tol/2) &
                        (cod_multi_mod > multi_thresh - tol/2))
        
    return np.squeeze(inds), b_inds, all_b_inds
예제 #6
0
def test_separate_bvals():
    bvals_t = np.array([5, 5, 10, 2010, 1005, 950, 1950, 1000])
    unique_b_t = np.array([0,1000,2000])
    bvals_scaled_t = np.array([0, 0, 0, 2000, 1000, 1000, 2000, 1000])
    bval_list_t = [(np.array([0, 0, 0]))]
    bval_list_t.append(np.array([1000, 1000, 1000]))
    bval_list_t.append(np.array([2000, 2000]))
    bval_ind_t = [np.array([0,1,2]), np.array([4,5,7]), np.array([3,6])]
    
    bval_list, bval_ind, unique_b, bvals_scaled = ozu.separate_bvals(bvals_t)
    npt.assert_equal(unique_b, unique_b_t)
    npt.assert_equal(bvals_scaled, bvals_scaled_t)
    
    for i in np.arange(len(bval_list)):
        npt.assert_equal(np.squeeze(bval_list)[i], bval_list_t[i])
        npt.assert_equal(np.squeeze(bval_ind)[i], bval_ind_t[i])
예제 #7
0
def test_separate_bvals():
    bvals_t = np.array([5, 5, 10, 2010, 1005, 950, 1950, 1000])
    unique_b_t = np.array([0, 1000, 2000])
    bvals_scaled_t = np.array([0, 0, 0, 2000, 1000, 1000, 2000, 1000])
    bval_list_t = [(np.array([0, 0, 0]))]
    bval_list_t.append(np.array([1000, 1000, 1000]))
    bval_list_t.append(np.array([2000, 2000]))
    bval_ind_t = [np.array([0, 1, 2]), np.array([4, 5, 7]), np.array([3, 6])]

    bval_list, bval_ind, unique_b, bvals_scaled = ozu.separate_bvals(bvals_t)
    npt.assert_equal(unique_b, unique_b_t)
    npt.assert_equal(bvals_scaled, bvals_scaled_t)

    for i in np.arange(len(bval_list)):
        npt.assert_equal(np.squeeze(bval_list)[i], bval_list_t[i])
        npt.assert_equal(np.squeeze(bval_ind)[i], bval_ind_t[i])
예제 #8
0
def scat_prop_snr(log_prop, data, bvals, mask):
    """
    Displays a scatter density plot of SNR versus the slope of the desired property
    
    Parameters
    ----------
    log_prop: list
        List of all the log of the desired property values
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    mask: 3 dimensional array
        Brain mask of the data
    """    
    bval_list, bval_ind, unique_b, _ = ozu.separate_bvals(bvals)
    
    if 0 in unique_b:
        unique_b = unique_b[1:]
    
    bsnr = snr.b_snr(data, bvals, unique_b[0], mask)[np.where(mask)]
    ls_fit_prop = ls_fit_b(log_prop, unique_b)
    
    mpl.scatter_density(bsnr, ls_fit_prop[0,:])
예제 #9
0
def slope(data, bvals, bvecs, prop, mask = 'None', saved_file = 'yes'):
    """
    Calculates and displays the slopes of a least squares solution fitted to either
    the log of the fractional anisotropy data or mean diffusivity data of the tensor
    model across the brain at different b values.
    
    Parameters
    ----------
    data: 4 dimensional array or Nifti1Image
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    bvecs: 3 dimensional array
        All the b vectors
    prop: str
        String indicating the property to analyzed
        'FA': Fractional anisotropy
        'MD': Mean diffusivity
    mask: 3 dimensional array or Nifti1Image
        Brain mask of the data
    saved_file: 'str'
        Indicate whether or not you want the function to create or use saved
        parameter files
        'no': Function will not create or use saved files
        'yes': Function will create or use saved files
        
    Returns
    -------
    slopeProp_all: 3 dimensional array
        Slope of the desired property across b values at each voxel
    """
    
    # Making sure inputs are all in the right format for calculations
    data, mask = obtain_data(data, mask)
        
    # Separate b values
    bval_list, bval_ind, unique_b, _ = ozu.separate_bvals(bvals)
    idx_array = np.arange(len(unique_b))
    
    new_bval_list = []
    for bi in idx_array:
        new_bval_list.append(bvals[bval_ind[bi]])
    
    # Add b = 0 values and indices to the other b values for tensor calculation
    bval_ind_wb0, bvals_wb0 = include_b0vals(idx_array, bval_ind, new_bval_list)
        
    # Find the property values for each grouped b values
    idx_mask = np.where(mask)
    log_prop = log_prop_vals(prop, saved_file, data, bvecs, idx_mask, idx_array, bval_ind_wb0, bvals_wb0, mask)
        
    # Fit a first order least squares solution to the specified property versus
    # b value to obtain slopes
    ls_fit = ls_fit_b(log_prop, unique_b/1000)
    
    # Display slopes of property on a mosaic
    slopeProp_all = disp_slopes(mask, ls_fit, prop)

    # Save all the values
    np.save('slope{0}_all.npy'.format(prop), slopeProp_all)
    np.save('log_{0}.npy'.format(prop), log_prop)
    np.save('ls_fit_{0}.npy'.format(prop), ls_fit[0])
    np.save('unique_b.npy', unique_b)
    
    return slopeProp_all, log_prop, ls_fit, unique_b
예제 #10
0
def across_sph_cc(vol_b_list,
                  bvals,
                  bvecs,
                  mask,
                  cod_single_mod=None,
                  cod_multi_mod=None,
                  single_thresh=None,
                  multi_thresh=None,
                  idx=None,
                  vol_mp_single=None,
                  tol=0.1,
                  n=20):
    """
    Calculates the spherical cross correlation at a certain index for all b values fit
    together and b values fit separately.
    
    Parameters
    ----------
    vol_b_list: list
        List of the model parameters for each voxel for an fODF fit to each b value.
    bvals: 1 dimensional array
        All b values
    bvecs: 2 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    cod_single_mod: 1 dimensional array
        Coefficient of determination at each voxel for the single fODF model
    cod_multi_mod: 1 dimensional array
        Coefficient of determination at each voxel for the multi fODF model
    single_thresh: int
        Coefficient of determination threshold for the single fODF model
    multi_thresh: int
        Coefficient of determination threshold for the multi fODF model
    idx: int
        Index into the indices indicating the voxels included in the COD (in)equality
    vol_mp_single: 2 dimensional array
        Model parameters from fitting a single fODF to each voxel
    tol: float
        Tolerance for the COD (in)equality
    n: int
        Integer indicating the number of directions to divide by for spherical
        cross-correlation
    
    Returns
    -------
    deg_list: list
        List indicating the degrees included in spherical cross-correlation
    cc_list: list
        List with the cross-correlations between each combination of b values
    idx: int
        Index into the indices indicating the voxels included in the COD (in)equality
    cod_s: float
        Coefficient of determination of single fODF model
    cod_m: float
        Coefficient of determination of multi fODF model
    """
    if (single_thresh != None) & (multi_thresh != None):
        # Get the indices with a desired COD.
        inds, b_inds, all_b_inds = sph_cc_ineq(cod_single_mod,
                                               cod_multi_mod,
                                               bvals,
                                               single_thresh,
                                               multi_thresh,
                                               tol=tol)
    else:
        # With no COD threshold, just find all the indices.
        inds = np.arange(int(np.sum(mask)))
        bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
        all_b_inds = np.where(rounded_bvals != 0)

    if idx is None:
        # If a specific index is not given, just find a random index.
        if ri is None:
            ri = np.random.randint(0, len(inds))
        else:
            ri = ri
        idx = inds[ri]

    data_list = []
    bvecs_b_list = []
    deg_list = []
    cc_list = []

    pool = np.arange(len(vol_b_list))

    for ii in pool:
        # Just get the data (model parameters) and bvecs within the chosen voxel (idx)
        # for each b value and mirror them.
        data_list.append(
            np.concatenate((vol_b_list[ii][np.where(mask)][idx],
                            vol_b_list[ii][np.where(mask)][idx]), -1))
        bvecs_b_list.append(
            np.squeeze(
                np.concatenate(
                    (bvecs[:, b_inds[ii + 1]], -1 * bvecs[:, b_inds[ii + 1]]),
                    -1)).T)
    if vol_mp_single is None:
        # Make combinations of b values for spherical cross correlation between b values
        combos = list(itertools.combinations(pool, 2))
        this_iter = np.arange(len(combos))
    else:
        # No need for combos since comparing between the the single fODF and the multi fODF
        combos = None
        this_iter = np.arange(len(vol_b_list))
        bvecs_all = np.squeeze(
            np.concatenate((bvecs[:, all_b_inds], -1 * bvecs[:, all_b_inds]),
                           -1)).T
        data_all = np.concatenate((vol_mp_single[np.where(mask)][idx],
                                   vol_mp_single[np.where(mask)][idx]), -1)

    for itr in this_iter:
        if vol_mp_single is None:
            # Inputs are data and bvecs from two different b values that you want to find the
            # spherical cross-correlation between
            inputs = [
                np.squeeze(data_list[combos[itr][0]]),
                np.squeeze(data_list[combos[itr][1]]),
                bvecs_b_list[combos[itr][0]], bvecs_b_list[combos[itr][1]]
            ]
        else:
            # Inputs are the data and bvecs from one b value and the single fODF
            inputs = [
                np.squeeze(data_all),
                np.squeeze(data_list[itr]), bvecs_all, bvecs_b_list[itr]
            ]
        # Put the inputs into the spherical cross-correlation function
        deg, cc = ozu.sph_cc(*inputs, n=n)
        deg_list.append(deg)
        cc_list.append(cc)

    if (single_thresh != None) & (multi_thresh != None):
        # Because sometimes it's nice to know what the actual CODs are
        cod_s = cod_single_mod[idx]
        cod_m = cod_multi_mod[idx]
    else:
        cod_s = None
        cod_m = None

    return deg_list, cc_list, combos, idx, cod_s, cod_m
예제 #11
0
def kfold_xval_MD_mod(data,
                      bvals,
                      bvecs,
                      mask,
                      func,
                      n,
                      factor=1000,
                      initial="preset",
                      bounds="preset",
                      params_file='temp',
                      signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors using kfold cross validation.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    bvecs: 3 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    func: function handle
        Mean model to perform kfold cross-validation on.
    initial: tuple
        Initial values for the parameters.
    n: int
        Integer indicating the percent of vertices that you want to predict
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    cod: 1 dimensional array
        Coefficent of Determination between data and predicted values
    predicted: 2 dimensional array
        Predicted mean for the vertices left out of the fit
    """
    if isinstance(func, str):
        # Grab the function handle for the desired isotropic model
        func = globals()[func]

    # Get the initial values for the desired isotropic model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data,
                                    bvecs,
                                    bvals,
                                    func,
                                    mask=mask,
                                    params_file=params_file)
        if bounds == "preset":
            bounds = all_params[0]
        if initial == "preset":
            func_initial = all_params[1]
    else:
        this_initial = initial

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    b_scaled = bvals / factor
    flat_data = data[np.where(mask)]

    # Pre-allocate outputs
    ss_err = np.zeros(int(np.sum(mask)))
    predict_out = np.zeros((int(np.sum(mask)), len(all_b_idx)))

    # Setting up for creating combinations of directions for kfold cross
    # validation:

    # Number of directions to leave out at a time.
    num_choose = (n / 100.) * len(all_b_idx)

    # Find the indices to all the non-b = 0 directions and shuffle them.
    vec_pool = np.arange(len(all_b_idx))
    np.random.shuffle(vec_pool)
    all_inc_0 = np.arange(len(rounded_bvals))

    # Start cross-validation
    for combo_num in np.arange(np.floor(100. / n)):
        (si, vec_combo, vec_combo_rm0, vec_pool_inds, these_bvecs, these_bvals,
         this_data, these_inc0) = ozu.create_combos(bvecs, bvals, data,
                                                    all_b_idx,
                                                    np.arange(len(all_b_idx)),
                                                    all_b_idx, vec_pool,
                                                    num_choose, combo_num)

        these_b = b_scaled[vec_combo]  # b values to predict
        for vox in np.arange(np.sum(mask)).astype(int):
            s0 = np.mean(flat_data[vox, b0_inds], -1)

            if initial == "preset":
                this_initial = func_initial[vox]

            input_signal = flat_data[vox, these_inc0] / s0

            if signal == "log":
                input_signal = np.log(input_signal)

            # Fit mean model to part of the data
            params, _ = opt.leastsq(err_func,
                                    this_initial,
                                    args=(b_scaled[these_inc0], input_signal,
                                          func))
            if bounds == None:
                params, _ = opt.leastsq(err_func,
                                        this_initial,
                                        args=(b_scaled[these_inc0],
                                              input_signal, func))
            else:
                lsq_b_out = lsq.leastsqbound(err_func,
                                             this_initial,
                                             args=(b_scaled[these_inc0],
                                                   input_signal, func),
                                             bounds=bounds)
                params = lsq_b_out[0]
            predict_out[vox, vec_combo_rm0] = func(these_b, *params)

    # Find the relative diffusion signal.
    s0 = np.mean(flat_data[:, b0_inds], -1).astype(float)
    input_signal = flat_data[:, all_b_idx] / s0[..., None]
    if signal == "log":
        input_signal = np.log(input_signal)
    cod = ozu.coeff_of_determination(input_signal, predict_out, axis=-1)

    return cod, predict_out
예제 #12
0
def place_files(file_names, mask_vox_num, expected_file_num, mask_data,
                data, bvals, file_path=os.getcwd(), vol=False,
                f_type="npy", save=False):
    """
    Function to aggregate sub data files from parallelizing.  Assumes that
    the sub_files are in the format:
    (file name)_(number of sub_file).(file_type)

    Parameters
    ----------
    file_names: list
        List of strings indicating the base file names for each output data
        aggregation
    mask_vox_num: int
        Number of voxels in each sub file
    expected_file_num: int
        Expected number of sub files
    mask_data: 3 dimensional array
        White matter mask
    data: 4 dimensional array
        The diffusion data
    bvals: 1 dimensional array
        The b-values used for the diffusion data
    file_path: str
        Path to the directory with all the sub files.  Default is the current
        directory
    vol: str
        String indicating whether or not the sub files are in volumes and
        whether the output files are saved as volumes as well
    f_type: str
        String indicating the type of file the sub files are saved as
    save: str
        String indicating whether or not to save the output aggregation/volumes
    num_dirs: int
        Number of directions in each output aggregation/volume

    Returns
    -------
    missing_files: 1 dimensional array
        All the sub files that are missing in the aggregation
    aggre_list: list
        List with all the aggregations/volumes
    """
    files = os.listdir(file_path)

    # Get data and indices
    mask_idx = np.where(mask_data)

    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)

    # Remove voxels from the mask that contain zero signal values and turn the
    # mask into linear form.
    S0 = np.mean(data[..., b_inds[0]],-1)
    pre_mask = np.array(mask_data, dtype=bool)
    ravel_mask = np.ravel(pre_mask).astype(int)
    ravel_mask[np.where(ravel_mask)[0][np.where(S0[pre_mask] == 0)]] = 2
    ravel_mask = ravel_mask[np.where(ravel_mask != 0)]
    ravel_mask[np.where(ravel_mask == 2)] = 0
    ravel_mask = ravel_mask.astype(bool)

    aggre_list = []
    missing_files_list = []
    for fn in file_names:
        count = 0
        # Keep track of files in case there are any missing ones
        i_track = np.ones(expected_file_num)

        # If you don't want to put the voxels back into a volume, just
        # preallocate enough for each voxel included in the mask.

        for f_idx in np.arange(len(files)):
            this_file = files[f_idx]
            if this_file[(len(this_file)-len(f_type)):len(this_file)] == f_type:

                if f_type == "npy":
                    sub_data = np.load(os.path.join(file_path, this_file))

                elif f_type == "nii.gz":
                    sub_data = ni.load(os.path.join(file_path,
                                        this_file)).get_data()

                # If the name of this file is equal to file name that you want
                # to aggregate, load it and find the voxels corresponding to its
                # location in the given mask.
                if this_file[0:len(fn)] == fn:
                    if count == 0:
                        if len(sub_data.shape) == 1:
                            num_dirs = 1

                        else:
                            num_dirs = sub_data.shape[-1]

                        if vol is False:
                            aggre = np.squeeze(ozu.nans((int(np.sum(mask_data)),
                                                               ) + (num_dirs,)))
                        else:
                            aggre = np.squeeze(ozu.nans((mask_data_file.shape
                                                             + (num_dirs,))))

                    count = count + 1
                    # Find the location in this aggregate for this file piece.
                    i = int(this_file.split(".")[0][len(fn):])
                    low = i*mask_vox_num
                    high = np.min([(i+1) * mask_vox_num,
                                int(np.sum(mask_data))])

                    # If you don't have a volume input and don't want a volume
                    # output, output just an aggregate the output files.
                    if vol is False:
                        if sub_data.shape[0] > aggre[low:high][ravel_mask[
                                                      low:high]].shape[0]:
                            aggre[low:high][ravel_mask[low:high]] = np.squeeze(
                                                sub_data)[ravel_mask[low:high]]
                        else:
                            aggre[low:high][ravel_mask[low:high]] = np.squeeze(
                                                                      sub_data)
                    else:
                        mask = np.zeros(mask_data_file.shape)
                        mask[mask_idx[0][low:high][ravel_mask[low:high]],
                             mask_idx[1][low:high][ravel_mask[low:high]],
                             mask_idx[2][low:high]][ravel_mask[low:high]] = 1
                        aggre[np.where(mask)] = sub_data
                    # If the file is present, change its index within the
                    # tracking array to 0.
                    i_track[i] = 0

        missing_files_list.append(np.squeeze(np.where(i_track)))
        aggre_list.append(aggre)

        if save is True:
            if vol is False:
                np.save("aggre_%s.npy"%fn, aggre)
            else:
                aff = mask_data_file.get_affine()
                ni.Nifti1Image(aggre, aff).to_filename("vol_%s.nii.gz"%fn)

    return missing_files_list, aggre_list
예제 #13
0
def isotropic_params(data,
                     bvals,
                     bvecs,
                     mask,
                     func,
                     factor=1000,
                     initial="preset",
                     bounds="preset",
                     params_file='temp',
                     signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    mask: 3 dimensional array
        Brain mask of the data
    func: str or callable
        String indicating the mean model function to perform kfold
        cross-validation on.
    initial: tuple
        Initial values for the parameters.
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    param_out: 2 dimensional array
        Parameters that minimize the residuals
    fit_out: 2 dimensional array
        Model fitted means
    ss_err: 2 dimensional array
        Sum squared error between the model fitted means and the actual means
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data,
                                    bvecs,
                                    bvals,
                                    func,
                                    mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    # Separate b values and grab their indices
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    # Divide the b values by a scaling factor first.
    b = bvals[all_b_idx] / factor
    flat_data = data[np.where(mask)]

    # Get the number of inputs to the mean diffusivity function
    param_num = len(inspect.getargspec(func)[0])

    # Pre-allocate the outputs:
    param_out = np.zeros((int(np.sum(mask)), param_num - 1))
    cod = ozu.nans(np.sum(mask))
    fit_out = ozu.nans(cod.shape + (len(all_b_idx), ))

    prog_bar = ozu.ProgressBar(flat_data.shape[0])

    for vox in np.arange(np.sum(mask)).astype(int):
        prog_bar.animate(vox)
        s0 = np.mean(flat_data[vox, b0_inds], -1)

        if initial == "preset":
            this_initial = func_initial[vox]

        input_signal = flat_data[vox, all_b_idx] / s0
        if signal == "log":
            input_signal = np.log(input_signal)

        if bounds == None:
            params, _ = opt.leastsq(err_func,
                                    this_initial,
                                    args=(b, input_signal, func))
        else:
            lsq_b_out = lsq.leastsqbound(err_func,
                                         this_initial,
                                         args=(b, input_signal, func),
                                         bounds=bounds)
            params = lsq_b_out[0]

        param_out[vox] = np.squeeze(params)
        fit_out[vox] = func(b, *params)
        cod[vox] = ozu.coeff_of_determination(input_signal, fit_out[vox])

    return param_out, fit_out, cod
예제 #14
0
if __name__ == "__main__":
    sid = "103414"
    hcp_path = "/biac4/wandell/data/klchan13/hcp_data_q3"
    data_path = os.path.join(hcp_path, "%s/T1w/Diffusion" % sid)

    data_file = nib.load(os.path.join(data_path, "data.nii.gz"))
    wm_data_file = nib.load(os.path.join(data_path, "wm_mask_no_vent.nii.gz"))

    data = data_file.get_data()
    wm_data = wm_data_file.get_data()
    wm_idx = np.where(wm_data == 1)

    bvals = np.loadtxt(os.path.join(data_path, "bvals"))
    bvecs = np.loadtxt(os.path.join(data_path, "bvecs"))

    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)

    ad_rd = np.loadtxt(os.path.join(data_path, "ad_rd_%s.txt" % sid))
    ad = {1000: ad_rd[0, 0], 2000: ad_rd[0, 1], 3000: ad_rd[0, 2]}
    rd = {1000: ad_rd[1, 0], 2000: ad_rd[1, 1], 3000: ad_rd[1, 2]}

    b_inds_w0 = np.concatenate((b_inds[0], b_inds[3]))
    actual, predicted = pn.kfold_xval_gen(
        dti.TensorModel, data[..., b_inds_w0], bvecs[:, b_inds_w0], bvals[b_inds_w0], 10, mask=wm_data
    )
    cod = ozu.coeff_of_determination(actual, predicted)
    np.save(os.path.join(data_path, "dtm_predict_b3k.npy"), predicted)
    np.save(os.path.join(data_path, "dtm_cod_b3k.npy"), cod)
예제 #15
0
def isotropic_params(data, bvals, bvecs, mask, func, factor=1000,
                       initial="preset", bounds="preset", params_file='temp',
                       signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    mask: 3 dimensional array
        Brain mask of the data
    func: str or callable
        String indicating the mean model function to perform kfold
        cross-validation on.
    initial: tuple
        Initial values for the parameters.
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    param_out: 2 dimensional array
        Parameters that minimize the residuals
    fit_out: 2 dimensional array
        Model fitted means
    ss_err: 2 dimensional array
        Sum squared error between the model fitted means and the actual means
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]
    

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data, bvecs, bvals, func, mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    # Separate b values and grab their indices
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    # Divide the b values by a scaling factor first.
    b = bvals[all_b_idx]/factor
    flat_data = data[np.where(mask)]

    # Get the number of inputs to the mean diffusivity function
    param_num = len(inspect.getargspec(func)[0])

    # Pre-allocate the outputs:
    param_out = np.zeros((int(np.sum(mask)), param_num - 1))
    cod = ozu.nans(np.sum(mask))
    fit_out = ozu.nans(cod.shape + (len(all_b_idx),))

    for vox in np.arange(np.sum(mask)).astype(int):
        s0 = np.mean(flat_data[vox, b0_inds], -1)

        if initial == "preset":
            this_initial = func_initial[vox]

        input_signal = flat_data[vox, all_b_idx]/s0
        if signal == "log":
            input_signal = np.log(input_signal)

        if bounds == None:
            params, _ = opt.leastsq(err_func, this_initial, args=(b,
                                                input_signal, func))
        else:
            lsq_b_out = lsq.leastsqbound(err_func, this_initial,
                                         args=(b, input_signal, func),
                                         bounds = bounds)
            params = lsq_b_out[0]

        param_out[vox] = np.squeeze(params)
        fit_out[vox] = func(b, *params)
        cod[vox] = ozu.coeff_of_determination(input_signal, fit_out[vox])

    return param_out, fit_out, cod
    low = i*2000
    # Make sure not to go over the edge of the mask:
    high = np.min([(i+1) * 2000, int(np.sum(wm_data))])
    
    # Preserve memory by getting rid of this data:
    del wm_data

    # Now set the mask:
    mask = np.zeros(wm_data_file.shape)
    mask[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = 1

    # Predict 10% (n = 10)
    ad = {1000:1.6386920952169737, 2000:1.2919249903637751, 3000:0.99962593218241236}
    rd = {1000:0.33450124887561905, 2000:0.28377379537043729, 3000:0.24611723207420028}
    
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    _, b_inds_rm0, _, _ = ozu.separate_bvals(bvals, mode = "remove0")
    all_b_idx = np.where(rounded_bvals != 0)
    
    mod_full = sfm.SparseDeconvolutionModelMultiB(data, bvecs, bvals, mask = mask,
                                                  params_file = "temp", solver = "nnls",
                                                  mean = "mean_model", axial_diffusivity = ad,
                                                  radial_diffusivity = rd)
    sig_out, new_params = mod_full.fit_flat_rel_sig_avg
    for idx in np.arange(1, len(unique_b)):
        these_b_inds = np.concatenate((b_inds[0], b_inds[idx]))
        b_mod = sfm.SparseDeconvolutionModelMultiB(data[..., these_b_inds], bvecs[:, these_b_inds],
                                                    bvals[these_b_inds], mask = mask,
                                                    params_file = "temp", solver = "nnls",
                                                    mean = "mean_model", axial_diffusivity = ad,
                                                    radial_diffusivity = rd)
예제 #17
0
파일: io.py 프로젝트: zhangerjun/osmosis
def place_files(file_names,
                mask_vox_num,
                expected_file_num,
                mask_data,
                data,
                bvals,
                file_path=os.getcwd(),
                vol=False,
                f_type="npy",
                save=False):
    """
    Function to aggregate sub data files from parallelizing.  Assumes that
    the sub_files are in the format:
    (file name)_(number of sub_file).(file_type)

    Parameters
    ----------
    file_names: list
        List of strings indicating the base file names for each output data
        aggregation
    mask_vox_num: int
        Number of voxels in each sub file
    expected_file_num: int
        Expected number of sub files
    mask_data: 3 dimensional array
        White matter mask
    data: 4 dimensional array
        The diffusion data
    bvals: 1 dimensional array
        The b-values used for the diffusion data
    file_path: str
        Path to the directory with all the sub files.  Default is the current
        directory
    vol: str
        String indicating whether or not the sub files are in volumes and
        whether the output files are saved as volumes as well
    f_type: str
        String indicating the type of file the sub files are saved as
    save: str
        String indicating whether or not to save the output aggregation/volumes
    num_dirs: int
        Number of directions in each output aggregation/volume

    Returns
    -------
    missing_files: 1 dimensional array
        All the sub files that are missing in the aggregation
    aggre_list: list
        List with all the aggregations/volumes
    """
    files = os.listdir(file_path)

    # Get data and indices
    mask_idx = np.where(mask_data)

    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)

    # Remove voxels from the mask that contain zero signal values and turn the
    # mask into linear form.
    S0 = np.mean(data[..., b_inds[0]], -1)
    pre_mask = np.array(mask_data, dtype=bool)
    ravel_mask = np.ravel(pre_mask).astype(int)
    ravel_mask[np.where(ravel_mask)[0][np.where(S0[pre_mask] == 0)]] = 2
    ravel_mask = ravel_mask[np.where(ravel_mask != 0)]
    ravel_mask[np.where(ravel_mask == 2)] = 0
    ravel_mask = ravel_mask.astype(bool)

    aggre_list = []
    missing_files_list = []
    for fn in file_names:
        count = 0
        # Keep track of files in case there are any missing ones
        i_track = np.ones(expected_file_num)

        # If you don't want to put the voxels back into a volume, just
        # preallocate enough for each voxel included in the mask.

        for f_idx in np.arange(len(files)):
            this_file = files[f_idx]
            if this_file[(len(this_file) -
                          len(f_type)):len(this_file)] == f_type:

                if f_type == "npy":
                    sub_data = np.load(os.path.join(file_path, this_file))

                elif f_type == "nii.gz":
                    sub_data = ni.load(os.path.join(file_path,
                                                    this_file)).get_data()

                # If the name of this file is equal to file name that you want
                # to aggregate, load it and find the voxels corresponding to its
                # location in the given mask.
                if this_file[0:len(fn)] == fn:
                    if count == 0:
                        if len(sub_data.shape) == 1:
                            num_dirs = 1

                        else:
                            num_dirs = sub_data.shape[-1]

                        if vol is False:
                            aggre = np.squeeze(
                                ozu.nans((int(np.sum(mask_data)), ) +
                                         (num_dirs, )))
                        else:
                            aggre = np.squeeze(
                                ozu.nans(
                                    (mask_data_file.shape + (num_dirs, ))))

                    count = count + 1
                    # Find the location in this aggregate for this file piece.
                    i = int(this_file.split(".")[0][len(fn):])
                    low = i * mask_vox_num
                    high = np.min([(i + 1) * mask_vox_num,
                                   int(np.sum(mask_data))])

                    # If you don't have a volume input and don't want a volume
                    # output, output just an aggregate the output files.
                    if vol is False:
                        if sub_data.shape[0] > aggre[low:high][
                                ravel_mask[low:high]].shape[0]:
                            aggre[low:high][ravel_mask[low:high]] = np.squeeze(
                                sub_data)[ravel_mask[low:high]]
                        else:
                            aggre[low:high][ravel_mask[low:high]] = np.squeeze(
                                sub_data)
                    else:
                        mask = np.zeros(mask_data_file.shape)
                        mask[mask_idx[0][low:high][ravel_mask[low:high]],
                             mask_idx[1][low:high][ravel_mask[low:high]],
                             mask_idx[2][low:high]][ravel_mask[low:high]] = 1
                        aggre[np.where(mask)] = sub_data
                    # If the file is present, change its index within the
                    # tracking array to 0.
                    i_track[i] = 0

        missing_files_list.append(np.squeeze(np.where(i_track)))
        aggre_list.append(aggre)

        if save is True:
            if vol is False:
                np.save("aggre_%s.npy" % fn, aggre)
            else:
                aff = mask_data_file.get_affine()
                ni.Nifti1Image(aggre, aff).to_filename("vol_%s.nii.gz" % fn)

    return missing_files_list, aggre_list
예제 #18
0
def kfold_xval_MD_mod(data, bvals, bvecs, mask, func, n, factor = 1000,
                      initial="preset", bounds = "preset", params_file='temp',
                      signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors using kfold cross validation.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    bvecs: 3 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    func: function handle
        Mean model to perform kfold cross-validation on.
    initial: tuple
        Initial values for the parameters.
    n: int
        Integer indicating the percent of vertices that you want to predict
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    cod: 1 dimensional array
        Coefficent of Determination between data and predicted values
    predicted: 2 dimensional array
        Predicted mean for the vertices left out of the fit
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data, bvecs, bvals, func, mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    b_scaled = bvals/factor
    flat_data = data[np.where(mask)]

    # Pre-allocate outputs
    ss_err = np.zeros(int(np.sum(mask)))
    predict_out = np.zeros((int(np.sum(mask)),len(all_b_idx)))

    # Setting up for creating combinations of directions for kfold cross
    # validation:

    # Number of directions to leave out at a time.
    num_choose = (n/100.)*len(all_b_idx)

    # Find the indices to all the non-b = 0 directions and shuffle them.
    vec_pool = np.arange(len(all_b_idx))
    np.random.shuffle(vec_pool)
    all_inc_0 = np.arange(len(rounded_bvals))

    # Start cross-validation
    for combo_num in np.arange(np.floor(100./n)):
        (si, vec_combo, vec_combo_rm0,
         vec_pool_inds, these_bvecs, these_bvals,
         this_data, these_inc0) = ozu.create_combos(bvecs, bvals, data,
                                                    all_b_idx,
                                                    np.arange(len(all_b_idx)),
                                                    all_b_idx, vec_pool,
                                                    num_choose, combo_num)
        this_flat_data = this_data[np.where(mask)]

        for vox in np.arange(np.sum(mask)).astype(int):
            s0 = np.mean(flat_data[vox, b0_inds], -1)
            these_b = b_scaled[vec_combo] # b values to predict

            if initial == "preset":
                this_initial = func_initial[vox]

            input_signal = flat_data[vox, these_inc0]/s0

            if signal == "log":
                input_signal = np.log(input_signal)

            # Fit mean model to part of the data
            params, _ = opt.leastsq(err_func, this_initial,
                                    args=(b_scaled[these_inc0],
                                    input_signal, func))
            if bounds == None:
                params, _ = opt.leastsq(err_func, this_initial,
                                        args=(b_scaled[these_inc0],
                                        input_signal, func))
            else:
                lsq_b_out = lsq.leastsqbound(err_func, this_initial,
                                             args = (b_scaled[these_inc0],
                                                     input_signal, func),
                                             bounds = bounds)
                params = lsq_b_out[0]
            # Predict the mean values of the left out b values using the
            # parameters from fitting to part of the b values.
            predict = func(these_b, *params)
            predict_out[vox, vec_combo_rm0] = func(these_b, *params)

    # Find the relative diffusion signal.
    s0 = np.mean(flat_data[:, b0_inds], -1).astype(float)
    input_signal = flat_data[:, all_b_idx]/s0[..., None]
    if signal == "log":
        input_signal = np.log(input_signal)
    cod = ozu.coeff_of_determination(input_signal, predict_out, axis=-1)

    return cod, predict_out
예제 #19
0
if __name__ == "__main__":
    sid = "103414"
    hcp_path = '/biac4/wandell/data/klchan13/hcp_data_q3'
    data_path = os.path.join(hcp_path, "%s/T1w/Diffusion" % sid)

    data_file = nib.load(os.path.join(data_path, "data.nii.gz"))
    wm_data_file = nib.load(os.path.join(data_path, "wm_mask_no_vent.nii.gz"))

    data = data_file.get_data()
    wm_data = wm_data_file.get_data()
    wm_idx = np.where(wm_data == 1)

    bvals = np.loadtxt(os.path.join(data_path, "bvals"))
    bvecs = np.loadtxt(os.path.join(data_path, "bvecs"))

    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)

    ad_rd = np.loadtxt(os.path.join(data_path, "ad_rd_%s.txt" % sid))
    ad = {1000: ad_rd[0, 0], 2000: ad_rd[0, 1], 3000: ad_rd[0, 2]}
    rd = {1000: ad_rd[1, 0], 2000: ad_rd[1, 1], 3000: ad_rd[1, 2]}

    b_inds_w0 = np.concatenate((b_inds[0], b_inds[3]))
    actual, predicted = pn.kfold_xval_gen(dti.TensorModel,
                                          data[..., b_inds_w0],
                                          bvecs[:, b_inds_w0],
                                          bvals[b_inds_w0],
                                          10,
                                          mask=wm_data)
    cod = ozu.coeff_of_determination(actual, predicted)
    np.save(os.path.join(data_path, "dtm_predict_b3k.npy"), predicted)
예제 #20
0
    mask = np.zeros(wm_data_file.shape)
    mask[wm_idx[0][low:high], wm_idx[1][low:high], wm_idx[2][low:high]] = 1

    # Predict 10% (n = 10)
    ad = {
        1000: 1.6386920952169737,
        2000: 1.2919249903637751,
        3000: 0.99962593218241236
    }
    rd = {
        1000: 0.33450124887561905,
        2000: 0.28377379537043729,
        3000: 0.24611723207420028
    }

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    _, b_inds_rm0, _, _ = ozu.separate_bvals(bvals, mode="remove0")
    all_b_idx = np.where(rounded_bvals != 0)

    mod_full = sfm.SparseDeconvolutionModelMultiB(data,
                                                  bvecs,
                                                  bvals,
                                                  mask=mask,
                                                  params_file="temp",
                                                  solver="nnls",
                                                  mean="mean_model",
                                                  axial_diffusivity=ad,
                                                  radial_diffusivity=rd)
    sig_out, new_params = mod_full.fit_flat_rel_sig_avg
    for idx in np.arange(1, len(unique_b)):
        these_b_inds = np.concatenate((b_inds[0], b_inds[idx]))
예제 #21
0
def across_sph_cc(vol_b_list, bvals, bvecs, mask, cod_single_mod = None, cod_multi_mod = None,
                  single_thresh = None, multi_thresh = None, idx = None, vol_mp_single = None,
                  tol = 0.1, n = 20):
    """
    Calculates the spherical cross correlation at a certain index for all b values fit
    together and b values fit separately.
    
    Parameters
    ----------
    vol_b_list: list
        List of the model parameters for each voxel for an fODF fit to each b value.
    bvals: 1 dimensional array
        All b values
    bvecs: 2 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    cod_single_mod: 1 dimensional array
        Coefficient of determination at each voxel for the single fODF model
    cod_multi_mod: 1 dimensional array
        Coefficient of determination at each voxel for the multi fODF model
    single_thresh: int
        Coefficient of determination threshold for the single fODF model
    multi_thresh: int
        Coefficient of determination threshold for the multi fODF model
    idx: int
        Index into the indices indicating the voxels included in the COD (in)equality
    vol_mp_single: 2 dimensional array
        Model parameters from fitting a single fODF to each voxel
    tol: float
        Tolerance for the COD (in)equality
    n: int
        Integer indicating the number of directions to divide by for spherical
        cross-correlation
    
    Returns
    -------
    deg_list: list
        List indicating the degrees included in spherical cross-correlation
    cc_list: list
        List with the cross-correlations between each combination of b values
    idx: int
        Index into the indices indicating the voxels included in the COD (in)equality
    cod_s: float
        Coefficient of determination of single fODF model
    cod_m: float
        Coefficient of determination of multi fODF model
    """
    if (single_thresh != None) & (multi_thresh != None):
        # Get the indices with a desired COD.
        inds, b_inds, all_b_inds = sph_cc_ineq(cod_single_mod, cod_multi_mod, bvals,
                                            single_thresh, multi_thresh, tol = tol)
    else:
        # With no COD threshold, just find all the indices.
        inds = np.arange(int(np.sum(mask)))
        bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
        all_b_inds = np.where(rounded_bvals != 0)
    
    if idx is None:
        # If a specific index is not given, just find a random index.
        if ri is None:
            ri = np.random.randint(0, len(inds))
        else:
            ri = ri
        idx = inds[ri]
    
    data_list = []
    bvecs_b_list = []
    deg_list = []
    cc_list = []
    
    pool = np.arange(len(vol_b_list))
    
    for ii in pool:
        # Just get the data (model parameters) and bvecs within the chosen voxel (idx)
        # for each b value and mirror them.
        data_list.append(np.concatenate((vol_b_list[ii][np.where(mask)][idx],
                                         vol_b_list[ii][np.where(mask)][idx]), -1))
        bvecs_b_list.append(np.squeeze(np.concatenate((bvecs[:, b_inds[ii+1]],
                                           -1*bvecs[:, b_inds[ii+1]]), -1)).T)
    if vol_mp_single is None:
        # Make combinations of b values for spherical cross correlation between b values
        combos = list(itertools.combinations(pool, 2))
        this_iter = np.arange(len(combos))
    else:
        # No need for combos since comparing between the the single fODF and the multi fODF
        combos = None
        this_iter = np.arange(len(vol_b_list))
        bvecs_all = np.squeeze(np.concatenate((bvecs[:, all_b_inds],
                               -1*bvecs[:, all_b_inds]), -1)).T
        data_all = np.concatenate((vol_mp_single[np.where(mask)][idx],
                                   vol_mp_single[np.where(mask)][idx]), -1)
    
    for itr in this_iter:
        if vol_mp_single is None:
            # Inputs are data and bvecs from two different b values that you want to find the
            # spherical cross-correlation between
            inputs = [np.squeeze(data_list[combos[itr][0]]), np.squeeze(data_list[combos[itr][1]]),
                      bvecs_b_list[combos[itr][0]], bvecs_b_list[combos[itr][1]]]
        else:
            # Inputs are the data and bvecs from one b value and the single fODF
            inputs = [np.squeeze(data_all), np.squeeze(data_list[itr]), bvecs_all, bvecs_b_list[itr]]
        # Put the inputs into the spherical cross-correlation function
        deg, cc = ozu.sph_cc(*inputs, n = n)
        deg_list.append(deg)
        cc_list.append(cc)
    
    if (single_thresh != None) & (multi_thresh != None):
        # Because sometimes it's nice to know what the actual CODs are
        cod_s = cod_single_mod[idx]
        cod_m = cod_multi_mod[idx]
    else:
        cod_s = None
        cod_m = None

    return deg_list, cc_list, combos, idx, cod_s, cod_m
예제 #22
0
data_t[:,:,:,10:13] = np.squeeze(500 + abs(np.random.randn(2,2,2,3)*200))#For b=2

# Mock mask to be used in most tests
mask_t = np.zeros([2,2,2])
mask_t[:,:,1] = 1

ad = {1000:1.6386920952169737, 2000:1.2919249903637751, 3000:0.99962593218241236}
rd = {1000:0.33450124887561905, 2000:0.28377379537043729, 3000:0.24611723207420028}

data_path = os.path.join(osmosis.__path__[0], 'data')

data_pv = nib.load(os.path.join(data_path, "red_data.nii.gz")).get_data()
bvals_pv = np.loadtxt(os.path.join(data_path, "bvals"))
bvecs_pv = np.loadtxt(os.path.join(data_path, "bvecs"))

bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals_pv)
all_b_inds = np.where(rounded_bvals != 0)

mask_pv = np.zeros(data_pv.shape[0:3])
mask_pv[0, 0, 0:2] = 1

actual_all = np.squeeze(data_pv[np.where(mask_pv)][:, all_b_inds])

np.random.seed(1975)

def test_regressors():
    full_mod_t = sfm.SparseDeconvolutionModelMultiB(data_t, bvecs_t, bvals_t,
                                                    mask = mask_t,
                                                    axial_diffusivity = ad,
                                                    radial_diffusivity = rd,
                                                    params_file = "temp")
예제 #23
0
def initial_params(data, bvecs, bvals, model, mask=None, params_file='temp'):
    """
    Determine the initial values for fitting the isotropic diffusion model.
    This only works on the models that fit to the relative diffusion signal.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvecs: 2 dimensional array
        All the b vectors
    bvals: 1 dimensional array
        All b values
    model: str
        Isotropic model
    mask: 3 dimensional array
        Mask of the data
    params_file: obj or str
        File handle of the param_files containing the tensor parameters.

    Returns
    -------
    bounds: list
        A list containing the bounds for each parameter for least squares
        fitting.
    initial: list
        A list containing the initial values for each parameter for least
        squares fitting.
    """
    dti_mod = dti.TensorModel(data,
                              bvecs,
                              bvals,
                              mask=mask,
                              params_file=params_file)

    d = dti_mod.mean_diffusivity[np.where(mask)]

    # Find initial noise floor
    _, b_inds, _, _ = ozu.separate_bvals(bvals)
    b0_data = data[np.where(mask)][:, b_inds[0]]
    #nf = np.std(b0_data, -1)/np.mean(b0_data, -1)
    nf = np.min(data[np.where(mask)], -1)
    if model == single_exp_rs:
        bounds = [(0, 4)]
        initial = d

    elif model == single_exp_nf_rs:
        bounds = [(0, 10000), (0, 4)]
        initial = np.concatenate(
            [nf[..., None], np.ones(d[..., None].shape)], -1)

    elif model == bi_exp_rs:
        bounds = [(0, 1), (0, 4), (0, 4)]
        initial = np.concatenate(
            [0.5 * np.ones((len(d), 1)), d[..., None], d[..., None]], -1)
    elif model == bi_exp_nf_rs:
        bounds = [(0, 10000), (0, 1), (0, 4), (0, 4)]
        initial = np.concatenate([
            nf[..., None], 0.5 * np.ones(
                (len(d), 1)), d[..., None], d[..., None]
        ], -1)
    return bounds, initial