Exemplo n.º 1
0
def test_coeff_of_determination():
    """
    Test the calculation of the coefficient of determination
    """
    # These are two corner cases that should lead to a nan answer:
    data = np.zeros(10)
    model = np.zeros(10)
    npt.assert_equal(np.isnan(ozu.coeff_of_determination(data, model)), True)

    data = np.zeros(10)
    model = np.random.randn(10)
    npt.assert_equal(np.isnan(ozu.coeff_of_determination(data, model)), True)

    # This should be perfect:
    data = np.random.randn(10)
    model = np.copy(data)
    npt.assert_equal(ozu.coeff_of_determination(data, model), 1)
Exemplo n.º 2
0
def test_coeff_of_determination():
    """
    Test the calculation of the coefficient of determination
    """
    # These are two corner cases that should lead to a nan answer:
    data = np.zeros(10)
    model = np.zeros(10)
    npt.assert_equal(np.isnan(ozu.coeff_of_determination(data,model)),
                              True)


    data = np.zeros(10)
    model = np.random.randn(10)
    npt.assert_equal(np.isnan(ozu.coeff_of_determination(data,model)),
                              True)

    # This should be perfect:
    data = np.random.randn(10)
    model = np.copy(data)
    npt.assert_equal(ozu.coeff_of_determination(data,model), 1)
Exemplo n.º 3
0
def coeff_of_determination(model1, model2):
    """
    Calculate the voxel-wise coefficient of determination between on model fit
    and the other model signal, averaged across both ways.
    """
    out = ozu.nans(model1.shape[:-1])
    
    sig1 = model1.signal[model1.mask]
    sig2 = model2.signal[model2.mask]
    fit1 = model1.fit[model1.mask]
    fit2 = model2.fit[model2.mask]

    fit1_R_sq = ozu.coeff_of_determination(fit1, sig2, axis=-1)
    fit2_R_sq = ozu.coeff_of_determination(fit2, sig1, axis=-1)

    # Average in each element:
    fit_R_sq = np.mean([fit1_R_sq, fit2_R_sq],0)

    out[model1.mask] = fit_R_sq

    return out
Exemplo n.º 4
0
def coeff_of_determination(model1, model2):
    """
    Calculate the voxel-wise coefficient of determination between on model fit
    and the other model signal, averaged across both ways.
    """
    out = ozu.nans(model1.shape[:-1])

    sig1 = model1.signal[model1.mask]
    sig2 = model2.signal[model2.mask]
    fit1 = model1.fit[model1.mask]
    fit2 = model2.fit[model2.mask]

    fit1_R_sq = ozu.coeff_of_determination(fit1, sig2, axis=-1)
    fit2_R_sq = ozu.coeff_of_determination(fit2, sig1, axis=-1)

    # Average in each element:
    fit_R_sq = np.mean([fit1_R_sq, fit2_R_sq], 0)

    out[model1.mask] = fit_R_sq

    return out
Exemplo n.º 5
0
if __name__ == "__main__":
    sid = "103414"
    hcp_path = "/biac4/wandell/data/klchan13/hcp_data_q3"
    data_path = os.path.join(hcp_path, "%s/T1w/Diffusion" % sid)

    data_file = nib.load(os.path.join(data_path, "data.nii.gz"))
    wm_data_file = nib.load(os.path.join(data_path, "wm_mask_no_vent.nii.gz"))

    data = data_file.get_data()
    wm_data = wm_data_file.get_data()
    wm_idx = np.where(wm_data == 1)

    bvals = np.loadtxt(os.path.join(data_path, "bvals"))
    bvecs = np.loadtxt(os.path.join(data_path, "bvecs"))

    bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals)
    all_b_idx = np.where(bvals_scaled != 0)

    ad_rd = np.loadtxt(os.path.join(data_path, "ad_rd_%s.txt" % sid))
    ad = {1000: ad_rd[0, 0], 2000: ad_rd[0, 1], 3000: ad_rd[0, 2]}
    rd = {1000: ad_rd[1, 0], 2000: ad_rd[1, 1], 3000: ad_rd[1, 2]}

    b_inds_w0 = np.concatenate((b_inds[0], b_inds[3]))
    actual, predicted = pn.kfold_xval_gen(
        dti.TensorModel, data[..., b_inds_w0], bvecs[:, b_inds_w0], bvals[b_inds_w0], 10, mask=wm_data
    )
    cod = ozu.coeff_of_determination(actual, predicted)
    np.save(os.path.join(data_path, "dtm_predict_b3k.npy"), predicted)
    np.save(os.path.join(data_path, "dtm_cod_b3k.npy"), cod)
Exemplo n.º 6
0
def kfold_xval_MD_mod(data, bvals, bvecs, mask, func, n, factor = 1000,
                      initial="preset", bounds = "preset", params_file='temp',
                      signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors using kfold cross validation.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    bvecs: 3 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    func: function handle
        Mean model to perform kfold cross-validation on.
    initial: tuple
        Initial values for the parameters.
    n: int
        Integer indicating the percent of vertices that you want to predict
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    cod: 1 dimensional array
        Coefficent of Determination between data and predicted values
    predicted: 2 dimensional array
        Predicted mean for the vertices left out of the fit
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data, bvecs, bvals, func, mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    b_scaled = bvals/factor
    flat_data = data[np.where(mask)]

    # Pre-allocate outputs
    ss_err = np.zeros(int(np.sum(mask)))
    predict_out = np.zeros((int(np.sum(mask)),len(all_b_idx)))

    # Setting up for creating combinations of directions for kfold cross
    # validation:

    # Number of directions to leave out at a time.
    num_choose = (n/100.)*len(all_b_idx)

    # Find the indices to all the non-b = 0 directions and shuffle them.
    vec_pool = np.arange(len(all_b_idx))
    np.random.shuffle(vec_pool)
    all_inc_0 = np.arange(len(rounded_bvals))

    # Start cross-validation
    for combo_num in np.arange(np.floor(100./n)):
        (si, vec_combo, vec_combo_rm0,
         vec_pool_inds, these_bvecs, these_bvals,
         this_data, these_inc0) = ozu.create_combos(bvecs, bvals, data,
                                                    all_b_idx,
                                                    np.arange(len(all_b_idx)),
                                                    all_b_idx, vec_pool,
                                                    num_choose, combo_num)
        this_flat_data = this_data[np.where(mask)]

        for vox in np.arange(np.sum(mask)).astype(int):
            s0 = np.mean(flat_data[vox, b0_inds], -1)
            these_b = b_scaled[vec_combo] # b values to predict

            if initial == "preset":
                this_initial = func_initial[vox]

            input_signal = flat_data[vox, these_inc0]/s0

            if signal == "log":
                input_signal = np.log(input_signal)

            # Fit mean model to part of the data
            params, _ = opt.leastsq(err_func, this_initial,
                                    args=(b_scaled[these_inc0],
                                    input_signal, func))
            if bounds == None:
                params, _ = opt.leastsq(err_func, this_initial,
                                        args=(b_scaled[these_inc0],
                                        input_signal, func))
            else:
                lsq_b_out = lsq.leastsqbound(err_func, this_initial,
                                             args = (b_scaled[these_inc0],
                                                     input_signal, func),
                                             bounds = bounds)
                params = lsq_b_out[0]
            # Predict the mean values of the left out b values using the
            # parameters from fitting to part of the b values.
            predict = func(these_b, *params)
            predict_out[vox, vec_combo_rm0] = func(these_b, *params)

    # Find the relative diffusion signal.
    s0 = np.mean(flat_data[:, b0_inds], -1).astype(float)
    input_signal = flat_data[:, all_b_idx]/s0[..., None]
    if signal == "log":
        input_signal = np.log(input_signal)
    cod = ozu.coeff_of_determination(input_signal, predict_out, axis=-1)

    return cod, predict_out
Exemplo n.º 7
0
def isotropic_params(data, bvals, bvecs, mask, func, factor=1000,
                       initial="preset", bounds="preset", params_file='temp',
                       signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    mask: 3 dimensional array
        Brain mask of the data
    func: str or callable
        String indicating the mean model function to perform kfold
        cross-validation on.
    initial: tuple
        Initial values for the parameters.
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    param_out: 2 dimensional array
        Parameters that minimize the residuals
    fit_out: 2 dimensional array
        Model fitted means
    ss_err: 2 dimensional array
        Sum squared error between the model fitted means and the actual means
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]
    

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data, bvecs, bvals, func, mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    # Separate b values and grab their indices
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    # Divide the b values by a scaling factor first.
    b = bvals[all_b_idx]/factor
    flat_data = data[np.where(mask)]

    # Get the number of inputs to the mean diffusivity function
    param_num = len(inspect.getargspec(func)[0])

    # Pre-allocate the outputs:
    param_out = np.zeros((int(np.sum(mask)), param_num - 1))
    cod = ozu.nans(np.sum(mask))
    fit_out = ozu.nans(cod.shape + (len(all_b_idx),))

    for vox in np.arange(np.sum(mask)).astype(int):
        s0 = np.mean(flat_data[vox, b0_inds], -1)

        if initial == "preset":
            this_initial = func_initial[vox]

        input_signal = flat_data[vox, all_b_idx]/s0
        if signal == "log":
            input_signal = np.log(input_signal)

        if bounds == None:
            params, _ = opt.leastsq(err_func, this_initial, args=(b,
                                                input_signal, func))
        else:
            lsq_b_out = lsq.leastsqbound(err_func, this_initial,
                                         args=(b, input_signal, func),
                                         bounds = bounds)
            params = lsq_b_out[0]

        param_out[vox] = np.squeeze(params)
        fit_out[vox] = func(b, *params)
        cod[vox] = ozu.coeff_of_determination(input_signal, fit_out[vox])

    return param_out, fit_out, cod
Exemplo n.º 8
0
def noise_ceiling(model1, model2, n_sims=1000, alpha=0.05):
    """
    Calculate the maximal model accuracy possible, given the noise in the
    signal. This is based on the method described by Kay et al. (in review).


    Parameters
    ----------
    model1, model2: two objects from a class inherited from BaseModel

    n_sims: int
       How many simulations of the signal to perform in each voxel.

    alpha:

    Returns
    -------
    coeff: The medians of the distributions of simulated signals in each voxel
    lb, ub: the (1-alpha) confidence interval boundaries on coeff

    Notes
    -----

    The following is performed on the relative signal ($\frac{S}{S_0}$):

    The idea is that noise in the signal can be computed in each voxel by
    comparing the signal in each direction as measured in two different
    measurements. The standard error of measurement for two sample points is
    calculated as follows. First we calculate the mean: 

    .. math ::

       \bar{x_i} = x_{i,1} + x_{i,2} 

    Where $i$ denotes the direction within the voxel. Next, we can calculate
    the standard deviation of the noise: 

    .. math::

        \sigma^2_{noise,i} = \frac{(x_{i,1} - \bar{x_i})^2 + (x_{i,2} - \bar{x_i})^2}{2}

    Note that this is also the standard error of the measurement, since it
    implicity contains the factor of $\sqrt{N-1} = \sqrt{1}$.

    We calculate a mean across directions:

    .. math::

        \sigma_{noise} = \sqrt{mean(\sigma^2_{noise, i})}

    Next, we calculate an estimate of the standard deviation attributable to
    the signal. This is done by first subtracting the noise variance from the
    overall data variance, while making sure that this quantity is non-negative
    and then taking the square root of the resulting quantity:
    
    .. math::

        \sigma_{signal} = \sqrt{max(0, np.mean(\sigma{x_1}, sigma{x_2}) - \sigma^2_{noise})}

    Then, we use Monte Carlo simulation to create a signal: to do that, we
    assume that the signal itself is generated from a Gaussian distribution
    with the above calculated variance). We add noise to this signal (zero mean
    Gaussian with variance $\sigma_{noise}) and compute the correlation between
    the noise-corrupted simulated signal and the noise-free simulated
    signal. The median of the resulting value over many simulations is the
    noise ceiling. The 95% central values represent a confidence interval on
    this value.  

    This is performed over each voxel in the mask.

    """
    # Extract the relative signal
    sig1 = model1.relative_signal[model1.mask]
    sig2 = model2.relative_signal[model1.mask]

    noise_ceil_flat = np.empty(sig1.shape[0])
    ub_flat = np.empty(sig1.shape[0])
    lb_flat = np.empty(sig1.shape[0])

    for vox in xrange(sig1.shape[0]):
        sigma_noise = np.sqrt(np.mean(np.var([sig1[vox], sig2[vox]], 0)))
        mean_sig_w_noise = np.mean([sig1[vox], sig2[vox]], 0)
        var_sig_w_noise = np.var(mean_sig_w_noise)
        sigma_signal = np.sqrt(np.max([0, var_sig_w_noise - sigma_noise**2]))
        # Create the simulated signal over many iterations:
        sim_signal = sigma_signal * np.random.randn(
            sig1[vox].shape[0] * n_sims)
        sim_signal_w_noise = (
            sim_signal +
            sigma_noise * np.random.randn(sig1[vox].shape[0] * n_sims))

        # Reshape it so that you have n_sims separate simulations of this voxel:
        sim_signal = np.reshape(sim_signal, (n_sims, -1))
        sim_signal_w_noise = np.reshape(sim_signal_w_noise, (n_sims, -1))
        coeffs = ozu.coeff_of_determination(sim_signal_w_noise, sim_signal)
        sort_coeffs = np.sort(coeffs)
        lb_flat[vox] = sort_coeffs[alpha / 2 * coeffs.shape[-1]]
        ub_flat[vox] = sort_coeffs[1 - alpha / 2 * coeffs.shape[-1]]
        noise_ceil_flat[vox] = np.median(coeffs)

    out_coeffs = ozu.nans(model1.mask.shape)
    out_ub = ozu.nans(out_coeffs.shape)
    out_lb = ozu.nans(out_coeffs.shape)

    out_coeffs[model1.mask] = noise_ceil_flat
    out_lb[model1.mask] = lb_flat
    out_ub[model1.mask] = ub_flat

    return out_coeffs, out_lb, out_ub
Exemplo n.º 9
0
    if im == "bi_exp_rs":
        shorthand_im = "be"
    elif im == "single_exp_rs":
        shorthand_im = "se"

    # Predict 10% (n = 10)
    actual, predicted = pn.kfold_xval(data,
                                      bvals,
                                      bvecs,
                                      mask,
                                      ad,
                                      rd,
                                      10,
                                      fODF,
                                      mean_mod_func=im,
                                      mean="mean_model",
                                      solver="nnls")

    cod = ozu.coeff_of_determination(actual, predicted)
    np.save(
        os.path.join(data_path,
                     "sfm_predict_%s_%s%s.npy" % (fODF, shorthand_im, i)),
        predicted)
    np.save(
        os.path.join(data_path,
                     "sfm_cod_%s_%s%s.npy" % (fODF, shorthand_im, i)), cod)

    t2 = time.time()
    print "This program took %4.2f minutes to run." % ((t2 - t1) / 60.)
Exemplo n.º 10
0
    def model_params(self):
        """
        The model parameters.

        Similar to the CanonicalTensorModel, if a fit has ocurred, the data is
        cached on disk as a nifti file 

        If a fit hasn't occured yet, calling this will trigger a model fit and
        derive the parameters.

        In that case, the steps are as follows:

        1. Perform OLS fitting on all voxels in the mask, with each of the
           $\vec{b}$ combinations, choosing only sets for which all weights are
           non-negative. 

        2. Find the PDD combination that most readily explains the data (highest
           correlation coefficient between the data and the predicted signal)
           That will be the combination used to derive the fit for that voxel.

        """
        # The file already exists:
        if os.path.isfile(self.params_file):
            if self.verbose:
                print("Loading params from file: %s" % self.params_file)

            # Get the cached values and be done with it:
            return ni.load(self.params_file).get_data()
        else:
            # Looks like we might need to do some fitting...

            # Get the bvec weights (we don't know how many...) and the
            # isotropic weights (which are always last):
            b_w = self.ols[:, :-1, :].copy().squeeze()
            i_w = self.ols[:, -1, :].copy().squeeze()

            # nan out the places where weights are negative:
            b_w[b_w < 0] = np.nan
            i_w[i_w < 0] = np.nan

            # Weight for each canonical tensor, plus a place for the index into
            # rot_idx and one more slot for the isotropic weight (at the end)
            params = np.empty(
                (self._flat_signal.shape[0], self.n_canonicals + 2))

            if self.verbose:
                print("Fitting MultiCanonicalTensorModel:")
                prog_bar = ozu.ProgressBar(self._flat_signal.shape[0])
                this_class = str(self.__class__).split("'")[-2].split('.')[-1]
                f_name = this_class + '.' + inspect.stack()[0][3]

            # Find the best OLS solution in each voxel:
            for vox in xrange(self._flat_signal.shape[0]):
                # We do this in each voxel (instead of all at once, which is
                # possible...) to not blow up the memory:
                vox_fits = np.empty((len(self.rot_idx), len(self.b_idx)))

                for idx, rot_idx in enumerate(self.rot_idx):
                    # The constant regressor gets added in first:
                    this_relative = i_w[idx, vox] * self.regressors[0][0]
                    # And we add the different canonicals on top of that:
                    this_relative += (
                        np.dot(
                            b_w[idx, :, vox],
                            # The tensor regressors are different in cases where we
                            # are fitting to relative/attenuation signal, so grab that
                            # from the regressors attr:
                            np.array([self.regressors[1][x]
                                      for x in rot_idx])))

                    if self.mode == 'relative_signal' or self.mode == 'normalize':
                        vox_fits[idx] = this_relative * self._flat_S0[vox]
                    elif self.mode == 'signal_attenuation':
                        vox_fits[idx] = (1 -
                                         this_relative) * self._flat_S0[vox]

                # Find the predicted signal that best matches the original
                # signal attenuation. That will choose the direction for the
                # tensor we use:
                corrs = ozu.coeff_of_determination(self._flat_signal[vox],
                                                   vox_fits)

                idx = np.where(corrs == np.nanmax(corrs))[0]

                # Sometimes there is no good solution:
                if len(idx):
                    # In case more than one fits the bill, just choose the
                    # first one:
                    if len(idx) > 1:
                        idx = idx[0]

                    params[vox, :] = np.hstack([
                        idx,
                        np.array([x for x in b_w[idx, :, vox]]).squeeze(),
                        i_w[idx, vox]
                    ])
                else:
                    # In which case we set it to all nans:
                    params[vox, :] = np.hstack(
                        [np.nan, self.n_canonicals * (np.nan, ), np.nan])

                if self.verbose:
                    prog_bar.animate(vox, f_name=f_name)

            # Save the params for future use:
            out_params = ozu.nans(self.signal.shape[:3] + (params.shape[-1], ))
            out_params[self.mask] = np.array(params).squeeze()
            params_ni = ni.Nifti1Image(out_params, self.affine)
            if self.params_file != 'temp':
                if self.verbose:
                    print("Saving params to file: %s" % self.params_file)
                params_ni.to_filename(self.params_file)

            # And return the params for current use:
            return out_params
Exemplo n.º 11
0
def noise_ceiling(model1, model2, n_sims=1000, alpha=0.05):
    """
    Calculate the maximal model accuracy possible, given the noise in the
    signal. This is based on the method described by Kay et al. (in review).


    Parameters
    ----------
    model1, model2: two objects from a class inherited from BaseModel

    n_sims: int
       How many simulations of the signal to perform in each voxel.

    alpha:

    Returns
    -------
    coeff: The medians of the distributions of simulated signals in each voxel
    lb, ub: the (1-alpha) confidence interval boundaries on coeff

    Notes
    -----

    The following is performed on the relative signal ($\frac{S}{S_0}$):

    The idea is that noise in the signal can be computed in each voxel by
    comparing the signal in each direction as measured in two different
    measurements. The standard error of measurement for two sample points is
    calculated as follows. First we calculate the mean: 

    .. math ::

       \bar{x_i} = x_{i,1} + x_{i,2} 

    Where $i$ denotes the direction within the voxel. Next, we can calculate
    the standard deviation of the noise: 

    .. math::

        \sigma^2_{noise,i} = \frac{(x_{i,1} - \bar{x_i})^2 + (x_{i,2} - \bar{x_i})^2}{2}

    Note that this is also the standard error of the measurement, since it
    implicity contains the factor of $\sqrt{N-1} = \sqrt{1}$.

    We calculate a mean across directions:

    .. math::

        \sigma_{noise} = \sqrt{mean(\sigma^2_{noise, i})}

    Next, we calculate an estimate of the standard deviation attributable to
    the signal. This is done by first subtracting the noise variance from the
    overall data variance, while making sure that this quantity is non-negative
    and then taking the square root of the resulting quantity:
    
    .. math::

        \sigma_{signal} = \sqrt{max(0, np.mean(\sigma{x_1}, sigma{x_2}) - \sigma^2_{noise})}

    Then, we use Monte Carlo simulation to create a signal: to do that, we
    assume that the signal itself is generated from a Gaussian distribution
    with the above calculated variance). We add noise to this signal (zero mean
    Gaussian with variance $\sigma_{noise}) and compute the correlation between
    the noise-corrupted simulated signal and the noise-free simulated
    signal. The median of the resulting value over many simulations is the
    noise ceiling. The 95% central values represent a confidence interval on
    this value.  

    This is performed over each voxel in the mask.

    """
    # Extract the relative signal 
    sig1 = model1.relative_signal[model1.mask]
    sig2 = model2.relative_signal[model1.mask]

    noise_ceil_flat = np.empty(sig1.shape[0])
    ub_flat = np.empty(sig1.shape[0])
    lb_flat = np.empty(sig1.shape[0])

    for vox in xrange(sig1.shape[0]):
        sigma_noise = np.sqrt(np.mean(np.var([sig1[vox],sig2[vox]],0)))
        mean_sig_w_noise = np.mean([sig1[vox], sig2[vox]], 0)
        var_sig_w_noise = np.var(mean_sig_w_noise)
        sigma_signal = np.sqrt(np.max([0, var_sig_w_noise - sigma_noise**2]))
        # Create the simulated signal over many iterations:
        sim_signal = sigma_signal * np.random.randn(sig1[vox].shape[0] * n_sims)
        sim_signal_w_noise = (sim_signal +
                    sigma_noise * np.random.randn(sig1[vox].shape[0] * n_sims))

        # Reshape it so that you have n_sims separate simulations of this voxel:
        sim_signal = np.reshape(sim_signal, (n_sims, -1))
        sim_signal_w_noise = np.reshape(sim_signal_w_noise, (n_sims, -1))
        coeffs = ozu.coeff_of_determination(sim_signal_w_noise, sim_signal)
        sort_coeffs = np.sort(coeffs)
        lb_flat[vox] = sort_coeffs[alpha/2 * coeffs.shape[-1]]
        ub_flat[vox] = sort_coeffs[1-alpha/2 * coeffs.shape[-1]]
        noise_ceil_flat[vox] = np.median(coeffs)

    out_coeffs = ozu.nans(model1.mask.shape)
    out_ub = ozu.nans(out_coeffs.shape)
    out_lb = ozu.nans(out_coeffs.shape)
    
    out_coeffs[model1.mask] = noise_ceil_flat
    out_lb[model1.mask] = lb_flat
    out_ub[model1.mask] = ub_flat

    return out_coeffs, out_lb, out_ub
    def model_params(self):
        """
        The model parameters.

        Similar to the CanonicalTensorModel, if a fit has ocurred, the data is
        cached on disk as a nifti file 

        If a fit hasn't occured yet, calling this will trigger a model fit and
        derive the parameters.

        In that case, the steps are as follows:

        1. Perform OLS fitting on all voxels in the mask, with each of the
           $\vec{b}$ combinations, choosing only sets for which all weights are
           non-negative. 

        2. Find the PDD combination that most readily explains the data (highest
           correlation coefficient between the data and the predicted signal)
           That will be the combination used to derive the fit for that voxel.

        """
        # The file already exists: 
        if os.path.isfile(self.params_file):
            if self.verbose:
                print("Loading params from file: %s"%self.params_file)

            # Get the cached values and be done with it:
            return ni.load(self.params_file).get_data()
        else:
            # Looks like we might need to do some fitting... 

            # Get the bvec weights (we don't know how many...) and the
            # isotropic weights (which are always last): 
            b_w = self.ols[:,:-1,:].copy().squeeze()
            i_w = self.ols[:,-1,:].copy().squeeze()

            # nan out the places where weights are negative: 
            b_w[b_w<0] = np.nan
            i_w[i_w<0] = np.nan

            # Weight for each canonical tensor, plus a place for the index into
            # rot_idx and one more slot for the isotropic weight (at the end)
            params = np.empty((self._flat_signal.shape[0],
                               self.n_canonicals + 2))

            if self.verbose:
                print("Fitting MultiCanonicalTensorModel:")
                prog_bar = ozu.ProgressBar(self._flat_signal.shape[0])
                this_class = str(self.__class__).split("'")[-2].split('.')[-1]
                f_name = this_class + '.' + inspect.stack()[0][3]

            # Find the best OLS solution in each voxel:
            for vox in xrange(self._flat_signal.shape[0]):
                # We do this in each voxel (instead of all at once, which is
                # possible...) to not blow up the memory:
                vox_fits = np.empty((len(self.rot_idx), len(self.b_idx)))
                
                for idx, rot_idx in enumerate(self.rot_idx):
                    # The constant regressor gets added in first:
                    this_relative = i_w[idx,vox] * self.regressors[0][0]
                    # And we add the different canonicals on top of that:
                    this_relative += (np.dot(b_w[idx,:,vox],
                    # The tensor regressors are different in cases where we
                    # are fitting to relative/attenuation signal, so grab that
                    # from the regressors attr:
                    np.array([self.regressors[1][x] for x in rot_idx])))

                    if self.mode == 'relative_signal' or self.mode=='normalize':
                        vox_fits[idx] = this_relative * self._flat_S0[vox]
                    elif self.mode == 'signal_attenuation':
                        vox_fits[idx] = (1 - this_relative) * self._flat_S0[vox]
                
                # Find the predicted signal that best matches the original
                # signal attenuation. That will choose the direction for the
                # tensor we use:
                corrs = ozu.coeff_of_determination(self._flat_signal[vox],
                                                   vox_fits)
                
                idx = np.where(corrs==np.nanmax(corrs))[0]

                # Sometimes there is no good solution:
                if len(idx):
                    # In case more than one fits the bill, just choose the
                    # first one:
                    if len(idx)>1:
                        idx = idx[0]
                    
                    params[vox,:] = np.hstack([idx,
                        np.array([x for x in b_w[idx,:,vox]]).squeeze(),
                        i_w[idx, vox]])
                else:
                    # In which case we set it to all nans:
                    params[vox,:] = np.hstack([np.nan,
                                               self.n_canonicals * (np.nan,),
                                               np.nan])

                if self.verbose: 
                    prog_bar.animate(vox, f_name=f_name)

            # Save the params for future use: 
            out_params = ozu.nans(self.signal.shape[:3]+
                                        (params.shape[-1],))
            out_params[self.mask] = np.array(params).squeeze()
            params_ni = ni.Nifti1Image(out_params, self.affine)
            if self.params_file != 'temp':
                if self.verbose:
                    print("Saving params to file: %s"%self.params_file)
                params_ni.to_filename(self.params_file)

            # And return the params for current use:
            return out_params
Exemplo n.º 13
0
    def model_params(self):
        """
        The model parameters.

        Similar to the TensorModel, if a fit has ocurred, the data is cached on
        disk as a nifti file 

        If a fit hasn't occured yet, calling this will trigger a model fit and
        derive the parameters.

        In that case, the steps are as follows:

        1. Perform OLS fitting on all voxels in the mask, with each of the
           $\vec{b}$. Choose only the non-negative weights. 

        2. Find the PDD that most readily explains the data (highest
           correlation coefficient between the data and the predicted signal)
           and use that one to derive the fit for that voxel

        """

        # The file already exists: 
        if os.path.isfile(self.params_file):
            if self.verbose:
                print("Loading params from file: %s"%self.params_file)

            # Get the cached values and be done with it:
            return ni.load(self.params_file).get_data()
        else:
            # Looks like we might need to do some fitting...
            # Get the bvec weights and the isotropic weights
            b_w = self.ols[:,0,:].copy().squeeze()
            i_w = self.ols[:,1,:].copy().squeeze()

            # nan out the places where weights are negative: 
            b_w[b_w<0] = np.nan
            i_w[i_w<0] = np.nan

            params = np.empty((self._flat_signal.shape[0],3))
            if self.verbose:
                print("Fitting CanonicalTensorModel:")
                prog_bar = ozu.ProgressBar(self._flat_signal.shape[0])
                this_class = str(self.__class__).split("'")[-2].split('.')[-1]
                f_name = this_class + '.' + inspect.stack()[0][3]
            # Find the best OLS solution in each voxel:
            for vox in xrange(self._flat_signal.shape[0]):
                # We do this in each voxel (instead of all at once, which is
                # possible...) to not blow up the memory:
                vox_fits = np.empty(self.rotations.shape)
                for rot_i, rot in enumerate(self.rotations):
                    if self.mode == 'log':
                        this_sig = (np.exp(b_w[rot_i,vox] * rot +
                                    self.regressors[0][0] * i_w[rot_i,vox]) *
                                    self._flat_S0[vox])
                    else:
                        this_relative = (b_w[rot_i,vox] * rot +
                                    self.regressors[0][0] * i_w[rot_i,vox])
                        if self.mode == 'signal_attenuation':
                            this_relative = 1 - this_relative

                        this_sig = this_relative * self._flat_S0[vox]

                    vox_fits[rot_i] = this_sig
                    
                # Find the predicted signal that best matches the original
                # relative signal. That will choose the direction for the
                # tensor we use:
                corrs = ozu.coeff_of_determination(self._flat_signal[vox],
                                                   vox_fits)
                idx = np.where(corrs==np.nanmax(corrs))[0]
                
                # Sometimes there is no good solution (maybe we need to fit
                # just an isotropic to all of these?):
                if len(idx):
                    # In case more than one fits the bill, just choose the
                    # first one:
                    if len(idx)>1:
                        idx = idx[0]
                    
                    params[vox,:] = np.array([idx,
                                              b_w[idx, vox],
                                              i_w[idx, vox]]).squeeze()
                else:
                    params[vox,:] = np.array([np.nan, np.nan, np.nan])

                if self.verbose:
                    prog_bar.animate(vox, f_name=f_name)

            # Save the params for future use: 
            out_params = ozu.nans(self.signal.shape[:3] + (3,))
            out_params[self.mask] = np.array(params).squeeze()
            params_ni = ni.Nifti1Image(out_params, self.affine)
            if self.params_file != 'temp':
                if self.verbose:
                    print("Saving params to file: %s"%self.params_file)
                    params_ni.to_filename(self.params_file)

            # And return the params for current use:
            return out_params
Exemplo n.º 14
0
    def model_params(self):
        """
        The model parameters.

        Similar to the TensorModel, if a fit has ocurred, the data is cached on
        disk as a nifti file 

        If a fit hasn't occured yet, calling this will trigger a model fit and
        derive the parameters.

        In that case, the steps are as follows:

        1. Perform OLS fitting on all voxels in the mask, with each of the
           $\vec{b}$. Choose only the non-negative weights. 

        2. Find the PDD that most readily explains the data (highest
           correlation coefficient between the data and the predicted signal)
           and use that one to derive the fit for that voxel

        """

        # The file already exists:
        if os.path.isfile(self.params_file):
            if self.verbose:
                print("Loading params from file: %s" % self.params_file)

            # Get the cached values and be done with it:
            return ni.load(self.params_file).get_data()
        else:
            # Looks like we might need to do some fitting...
            # Get the bvec weights and the isotropic weights
            b_w = self.ols[:, 0, :].copy().squeeze()
            i_w = self.ols[:, 1, :].copy().squeeze()

            # nan out the places where weights are negative:
            b_w[b_w < 0] = np.nan
            i_w[i_w < 0] = np.nan

            params = np.empty((self._flat_signal.shape[0], 3))
            if self.verbose:
                print("Fitting CanonicalTensorModel:")
                prog_bar = ozu.ProgressBar(self._flat_signal.shape[0])
                this_class = str(self.__class__).split("'")[-2].split('.')[-1]
                f_name = this_class + '.' + inspect.stack()[0][3]
            # Find the best OLS solution in each voxel:
            for vox in xrange(self._flat_signal.shape[0]):
                # We do this in each voxel (instead of all at once, which is
                # possible...) to not blow up the memory:
                vox_fits = np.empty(self.rotations.shape)
                for rot_i, rot in enumerate(self.rotations):
                    if self.mode == 'log':
                        this_sig = (
                            np.exp(b_w[rot_i, vox] * rot +
                                   self.regressors[0][0] * i_w[rot_i, vox]) *
                            self._flat_S0[vox])
                    else:
                        this_relative = (
                            b_w[rot_i, vox] * rot +
                            self.regressors[0][0] * i_w[rot_i, vox])
                        if self.mode == 'signal_attenuation':
                            this_relative = 1 - this_relative

                        this_sig = this_relative * self._flat_S0[vox]

                    vox_fits[rot_i] = this_sig

                # Find the predicted signal that best matches the original
                # relative signal. That will choose the direction for the
                # tensor we use:
                corrs = ozu.coeff_of_determination(self._flat_signal[vox],
                                                   vox_fits)
                idx = np.where(corrs == np.nanmax(corrs))[0]

                # Sometimes there is no good solution (maybe we need to fit
                # just an isotropic to all of these?):
                if len(idx):
                    # In case more than one fits the bill, just choose the
                    # first one:
                    if len(idx) > 1:
                        idx = idx[0]

                    params[vox, :] = np.array(
                        [idx, b_w[idx, vox], i_w[idx, vox]]).squeeze()
                else:
                    params[vox, :] = np.array([np.nan, np.nan, np.nan])

                if self.verbose:
                    prog_bar.animate(vox, f_name=f_name)

            # Save the params for future use:
            out_params = ozu.nans(self.signal.shape[:3] + (3, ))
            out_params[self.mask] = np.array(params).squeeze()
            params_ni = ni.Nifti1Image(out_params, self.affine)
            if self.params_file != 'temp':
                if self.verbose:
                    print("Saving params to file: %s" % self.params_file)
                    params_ni.to_filename(self.params_file)

            # And return the params for current use:
            return out_params
Exemplo n.º 15
0
def kfold_xval_MD_mod(data,
                      bvals,
                      bvecs,
                      mask,
                      func,
                      n,
                      factor=1000,
                      initial="preset",
                      bounds="preset",
                      params_file='temp',
                      signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors using kfold cross validation.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    bvecs: 3 dimensional array
        All the b vectors
    mask: 3 dimensional array
        Brain mask of the data
    func: function handle
        Mean model to perform kfold cross-validation on.
    initial: tuple
        Initial values for the parameters.
    n: int
        Integer indicating the percent of vertices that you want to predict
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    cod: 1 dimensional array
        Coefficent of Determination between data and predicted values
    predicted: 2 dimensional array
        Predicted mean for the vertices left out of the fit
    """
    if isinstance(func, str):
        # Grab the function handle for the desired isotropic model
        func = globals()[func]

    # Get the initial values for the desired isotropic model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data,
                                    bvecs,
                                    bvals,
                                    func,
                                    mask=mask,
                                    params_file=params_file)
        if bounds == "preset":
            bounds = all_params[0]
        if initial == "preset":
            func_initial = all_params[1]
    else:
        this_initial = initial

    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    b_scaled = bvals / factor
    flat_data = data[np.where(mask)]

    # Pre-allocate outputs
    ss_err = np.zeros(int(np.sum(mask)))
    predict_out = np.zeros((int(np.sum(mask)), len(all_b_idx)))

    # Setting up for creating combinations of directions for kfold cross
    # validation:

    # Number of directions to leave out at a time.
    num_choose = (n / 100.) * len(all_b_idx)

    # Find the indices to all the non-b = 0 directions and shuffle them.
    vec_pool = np.arange(len(all_b_idx))
    np.random.shuffle(vec_pool)
    all_inc_0 = np.arange(len(rounded_bvals))

    # Start cross-validation
    for combo_num in np.arange(np.floor(100. / n)):
        (si, vec_combo, vec_combo_rm0, vec_pool_inds, these_bvecs, these_bvals,
         this_data, these_inc0) = ozu.create_combos(bvecs, bvals, data,
                                                    all_b_idx,
                                                    np.arange(len(all_b_idx)),
                                                    all_b_idx, vec_pool,
                                                    num_choose, combo_num)

        these_b = b_scaled[vec_combo]  # b values to predict
        for vox in np.arange(np.sum(mask)).astype(int):
            s0 = np.mean(flat_data[vox, b0_inds], -1)

            if initial == "preset":
                this_initial = func_initial[vox]

            input_signal = flat_data[vox, these_inc0] / s0

            if signal == "log":
                input_signal = np.log(input_signal)

            # Fit mean model to part of the data
            params, _ = opt.leastsq(err_func,
                                    this_initial,
                                    args=(b_scaled[these_inc0], input_signal,
                                          func))
            if bounds == None:
                params, _ = opt.leastsq(err_func,
                                        this_initial,
                                        args=(b_scaled[these_inc0],
                                              input_signal, func))
            else:
                lsq_b_out = lsq.leastsqbound(err_func,
                                             this_initial,
                                             args=(b_scaled[these_inc0],
                                                   input_signal, func),
                                             bounds=bounds)
                params = lsq_b_out[0]
            predict_out[vox, vec_combo_rm0] = func(these_b, *params)

    # Find the relative diffusion signal.
    s0 = np.mean(flat_data[:, b0_inds], -1).astype(float)
    input_signal = flat_data[:, all_b_idx] / s0[..., None]
    if signal == "log":
        input_signal = np.log(input_signal)
    cod = ozu.coeff_of_determination(input_signal, predict_out, axis=-1)

    return cod, predict_out
Exemplo n.º 16
0
def isotropic_params(data,
                     bvals,
                     bvecs,
                     mask,
                     func,
                     factor=1000,
                     initial="preset",
                     bounds="preset",
                     params_file='temp',
                     signal="relative_signal"):
    """
    Finds the parameters of the given function to the given data
    that minimizes the sum squared errors.

    Parameters
    ----------
    data: 4 dimensional array
        Diffusion MRI data
    bvals: 1 dimensional array
        All b values
    mask: 3 dimensional array
        Brain mask of the data
    func: str or callable
        String indicating the mean model function to perform kfold
        cross-validation on.
    initial: tuple
        Initial values for the parameters.
    factor: int
        Integer indicating the scaling factor for the b values
    bounds: list
        List containing tuples indicating the bounds for each parameter in
        the mean model function.

    Returns
    -------
    param_out: 2 dimensional array
        Parameters that minimize the residuals
    fit_out: 2 dimensional array
        Model fitted means
    ss_err: 2 dimensional array
        Sum squared error between the model fitted means and the actual means
    """
    if isinstance(func, str):
        # Grab the function handle for the desired mean model
        func = globals()[func]

    # Get the initial values for the desired mean model
    if (bounds == "preset") | (initial == "preset"):
        all_params = initial_params(data,
                                    bvecs,
                                    bvals,
                                    func,
                                    mask=mask,
                                    params_file=params_file)
    if bounds == "preset":
        bounds = all_params[0]
    if initial == "preset":
        func_initial = all_params[1]
    else:
        this_initial = initial

    # Separate b values and grab their indices
    bval_list, b_inds, unique_b, rounded_bvals = ozu.separate_bvals(bvals)
    all_b_idx, b0_inds = _diffusion_inds(bvals, b_inds, rounded_bvals)

    # Divide the b values by a scaling factor first.
    b = bvals[all_b_idx] / factor
    flat_data = data[np.where(mask)]

    # Get the number of inputs to the mean diffusivity function
    param_num = len(inspect.getargspec(func)[0])

    # Pre-allocate the outputs:
    param_out = np.zeros((int(np.sum(mask)), param_num - 1))
    cod = ozu.nans(np.sum(mask))
    fit_out = ozu.nans(cod.shape + (len(all_b_idx), ))

    prog_bar = ozu.ProgressBar(flat_data.shape[0])

    for vox in np.arange(np.sum(mask)).astype(int):
        prog_bar.animate(vox)
        s0 = np.mean(flat_data[vox, b0_inds], -1)

        if initial == "preset":
            this_initial = func_initial[vox]

        input_signal = flat_data[vox, all_b_idx] / s0
        if signal == "log":
            input_signal = np.log(input_signal)

        if bounds == None:
            params, _ = opt.leastsq(err_func,
                                    this_initial,
                                    args=(b, input_signal, func))
        else:
            lsq_b_out = lsq.leastsqbound(err_func,
                                         this_initial,
                                         args=(b, input_signal, func),
                                         bounds=bounds)
            params = lsq_b_out[0]

        param_out[vox] = np.squeeze(params)
        fit_out[vox] = func(b, *params)
        cod[vox] = ozu.coeff_of_determination(input_signal, fit_out[vox])

    return param_out, fit_out, cod