def ols(self):
        """
        Compute the design matrices the matrices for OLS fitting and the OLS
        solution. Cache them for reuse in each direction over all voxels.
        """
        ols_weights = np.empty((len(self.rot_idx), self.n_canonicals + 1,
                                self._flat_signal.shape[0]))

        iso_regressor, tensor_regressor, fit_to = self.regressors

        where_are_we = 0
        for row, idx in enumerate(self.rot_idx):
            # 'row' refers to where we are in ols_weights
            if self.verbose:
                if idx[0] == where_are_we:
                    s = "Starting MultiCanonicalTensorModel fit"
                    s += " for %sth set of basis functions" % (where_are_we)
                    print(s)
                    where_are_we += 1
            # The 'design matrix':
            d = np.vstack([[tensor_regressor[i] for i in idx],
                           iso_regressor]).T
            # This is $(X' X)^{-1} X':
            ols_mat = ozu.ols_matrix(d)
            # Multiply to find the OLS solution:
            ols_weights[row] = np.array(np.dot(ols_mat, fit_to)).squeeze()

        return ols_weights
Esempio n. 2
0
def ls_fit_b(log_prop, unique_b):
    """
    Does calculations for fitting a first order least squares solution to the
    properties
    
    Parameters
    ----------
    log_prop: list
        List of all the log of the desired property values
    unique_b: 1 dimensional array
        Array of all the unique b values found
        
    Returns
    -------
    ls_fit: 1 dimensional array
        An array with the results from the least squares fit
    """
    if 0 in unique_b:
        unique_b = unique_b[1:]
        
    log_prop_matrix = np.matrix(log_prop)
    b_matrix = np.matrix([unique_b, np.ones(len(unique_b))]).T
    b_inv = utils.ols_matrix(b_matrix)
    ls_fit = np.dot(b_inv, log_prop_matrix)
    
    return ls_fit
    def ols(self):
        """
        Compute the design matrices the matrices for OLS fitting and the OLS
        solution. Cache them for reuse in each direction over all voxels.
        """
        ols_weights = np.empty((len(self.rot_idx),
                                self.n_canonicals + 1,
                                self._flat_signal.shape[0]))

        iso_regressor, tensor_regressor, fit_to = self.regressors

        where_are_we = 0
        for row, idx in enumerate(self.rot_idx):                
        # 'row' refers to where we are in ols_weights
            if self.verbose:
                if idx[0]==where_are_we:
                    s = "Starting MultiCanonicalTensorModel fit"
                    s += " for %sth set of basis functions"%(where_are_we) 
                    print (s)
                    where_are_we += 1
            # The 'design matrix':
            d = np.vstack([[tensor_regressor[i] for i in idx],
                           iso_regressor]).T
            # This is $(X' X)^{-1} X':
            ols_mat = ozu.ols_matrix(d)
            # Multiply to find the OLS solution:
            ols_weights[row] = np.array(np.dot(ols_mat, fit_to)).squeeze()

        return ols_weights
Esempio n. 4
0
def test_ls_fit_b():
    log_prop_t = test_log_prop_vals()
    b_matrix_t = np.matrix([[1,2], [1,1]]).T
    b_inv = ols_matrix(b_matrix_t)
    ls_fit_FA_t = np.dot(b_inv, np.matrix(log_prop_t))
    
    npt.assert_equal(ls_fit_FA_t, mf.ls_fit_b(log_prop_t, unique_b_t))
    
    return ls_fit_FA_t
Esempio n. 5
0
def test_ols_matrix():
    """
    Test that this really does OLS regression.
    """
    # Parameters
    beta = np.random.rand(10)
    # Inputs
    x = np.random.rand(100, 10)
    # Outputs (noise-less!)
    y = np.dot(x, beta)
    # Estimate back:
    ols_matrix = ozu.ols_matrix(x)
    beta_hat = np.array(np.dot(ols_matrix, y)).squeeze()
    # This should have recovered the original:
    npt.assert_almost_equal(beta, beta_hat)

    # Make sure that you can normalize and it gives you the same shape matrix:
    npt.assert_almost_equal(ols_matrix.shape,
                            ozu.ols_matrix(x, norm_func=ozu.l2_norm).shape)
Esempio n. 6
0
def test_ols_matrix():
    """
    Test that this really does OLS regression.
    """
    # Parameters
    beta = np.random.rand(10)
    # Inputs
    x = np.random.rand(100,10)
    # Outputs (noise-less!)
    y = np.dot(x, beta)
    # Estimate back:
    ols_matrix = ozu.ols_matrix(x)
    beta_hat = np.array(np.dot(ols_matrix, y)).squeeze()
    # This should have recovered the original:
    npt.assert_almost_equal(beta, beta_hat)
    
    # Make sure that you can normalize and it gives you the same shape matrix: 
    npt.assert_almost_equal(ols_matrix.shape,
                            ozu.ols_matrix(x, norm_func=ozu.l2_norm).shape)
Esempio n. 7
0
def test_ls_fit_b():
    log_prop_t = test_log_prop_vals()
    b_matrix_t = np.matrix([[1,2], [1,1]]).T
    b_inv = ols_matrix(b_matrix_t)
    ls_fit_FA_t = np.dot(b_inv, np.matrix(log_prop_t))

    for idx in np.arange(len(ls_fit_FA_t)):
        npt.assert_equal(abs(ls_fit_FA_t[idx]-mf.ls_fit_b(log_prop_t,
                                        unique_b_t)[idx])<0.001, 1)
    
    return ls_fit_FA_t
Esempio n. 8
0
    def ols(self):
        """
        Compute the OLS solution. 
        """
        # Preallocate:
        ols_weights = np.empty((self.rotations.shape[0], 2,
                               self._flat_signal.shape[0]))

        iso_regressor, tensor_regressor, fit_to = self.regressors
        
        for idx in xrange(ols_weights.shape[0]):
            # The 'design matrix':
            d = np.vstack([tensor_regressor[idx], iso_regressor]).T
            # This is $(X' X)^{-1} X':
            ols_mat = ozu.ols_matrix(d)
            # Multiply to find the OLS solution (fitting to all the voxels in
            # one fell swoop):
            ols_weights[idx] = np.dot(ols_mat, fit_to)
            # ols_weights[idx] = npla.lstsq(d, fit_to)[0]
        return ols_weights
Esempio n. 9
0
    def ols(self):
        """
        Compute the OLS solution. 
        """
        # Preallocate:
        ols_weights = np.empty(
            (self.rotations.shape[0], 2, self._flat_signal.shape[0]))

        iso_regressor, tensor_regressor, fit_to = self.regressors

        for idx in xrange(ols_weights.shape[0]):
            # The 'design matrix':
            d = np.vstack([tensor_regressor[idx], iso_regressor]).T
            # This is $(X' X)^{-1} X':
            ols_mat = ozu.ols_matrix(d)
            # Multiply to find the OLS solution (fitting to all the voxels in
            # one fell swoop):
            ols_weights[idx] = np.dot(ols_mat, fit_to)
            # ols_weights[idx] = npla.lstsq(d, fit_to)[0]
        return ols_weights