コード例 #1
0
ファイル: sparse.py プロジェクト: austinpeel/jax_cosmo
def slogdet(sparse):
    """Calculate the log(determinant) of a sparse matrix.

    Based on equation (2.2) of https://arxiv.org/abs/1112.4379

    Parameters
    ----------
    sparse : array
        3D array of shape (ny, nx, ndiag) of block diagonal elements.

    Returns
    -------
    tuple
        Tuple (sign, logdet) such that sign * exp(logdet) is the
        determinant. If the determinant is zero, logdet = -inf.
    """
    sparse = check_sparse(sparse, square=True)
    N, _, P = sparse.shape
    sign = np.product(np.sign(sparse[-1, -1]))
    logdet = np.sum(np.log(np.abs(sparse[-1, -1])))
    # The individual blocks can be calculated in any order so there
    # should be a better way to express this using lax.map but I
    # can't get it to work without "concretization" errors.
    for i in range(N - 1):
        s, ld = _block_det(sparse, i, N, P)
        sign *= s
        logdet += ld
    return sign, logdet
コード例 #2
0
ファイル: pendulum.py プロジェクト: pierrelux/lagrangian-rl
 def dxdt(x, u, t):
     del t
     w = np.product(params)
     # assume point mass and massless arm
     inertia = params.mass * params.length**2
     force = w * np.sin(x[0]) + u[0] - x[1] * params.drag
     return np.stack((x[1], force / inertia))
コード例 #3
0
ファイル: sparse.py プロジェクト: austinpeel/jax_cosmo
def _block_det(sparse, k, N, P):
    u = sparse[k:k + 1, k + 1:N, 0:P]
    S = sparse[k + 1:N, k + 1:N, 0:P]
    v = sparse[k + 1:N, k:k + 1, 0:P]
    Sinv_v = sparse_dot_sparse(inv(S), v)
    M = sparse[k, k] - sparse_dot_sparse(u, Sinv_v)
    sign = np.product(np.sign(M))
    logdet = np.sum(np.log(np.abs(M)))
    return sign, logdet
コード例 #4
0
ファイル: _ard.py プロジェクト: lschmors/RFEst
    def update_C_prior(self, params):
        """
        
        Overwrite the kronecker product construction from 1D to nD,. 
        
        ARD cannot utilise this due to the assumption it made that every 
        pixel in the RF should be panelized by it's own hyperparameter.
            
        """
        rho = params[1]
        theta = params[2:]
        C, C_inv = sparsity_kernel(theta, np.product(self.dims))
        C *= rho
        C_inv /= rho

        return C, C_inv
コード例 #5
0
def group_utility(particle_weights,
                  particles,
                  groups,
                  group_sensitivities,
                  group_specificities,
                  utility_fun):
  """Compute the utility of a set of groups.

  This function computes the utility of a set of groups, given a distribution
  over the population status encoded as a weighted sum of Dirac measures on
  particles, the specificities and sensitivities of tests, and a utility
  function.

  Args:
   particle_weights: weights of particles
   particles: particles summarizing belief about infection status
   groups: set of groups to be tested
   group_sensitivities: sensitivies of test for each group
   group_specificities: specificities of test for each group
   utility_fun: a utility function that takes as input (particle_weights,
      particles) and output the utility of the distribution

  Returns:
   The expected utility (over the test results) of the posterior
  """
  num_groups = groups.shape[0]
  proba_y_is_one_given_x = (np.matmul(particles, np.transpose(groups))
                            * (group_sensitivities + group_specificities - 1)
                            + 1.0 - group_specificities)
  proba_y_is_one_given_x = np.expand_dims(proba_y_is_one_given_x, axis=2)
  test_res = np.array(list(itertools.product([0, 1], repeat=num_groups)))
  test_res = np.expand_dims(np.transpose(test_res), axis=0)
  proba_y_given_x = np.product(test_res * proba_y_is_one_given_x + (1-test_res)
                               * (1-proba_y_is_one_given_x), axis=1)
  proba_y_and_x = proba_y_given_x * np.expand_dims(particle_weights, 1)
  proba_y = np.sum(proba_y_and_x, axis=0)
  proba_x_given_y = proba_y_and_x / np.expand_dims(proba_y, 0)
  vutility_fun = jax.vmap(utility_fun, [1, None])
  utility_x_given_y = vutility_fun(proba_x_given_y, particles)
  return np.dot(proba_y, utility_x_given_y)
コード例 #6
0
ファイル: discrete.py プロジェクト: hdocmsu/numpyro
 def sample(self, key, sample_shape=()):
     return np.reshape(random.split(key, np.product(sample_shape).astype(np.int32)),
                       sample_shape + self.event_shape)
コード例 #7
0
ファイル: _glm.py プロジェクト: lschmors/RFEst
    def _get_filter_variance(self, w_type='opt'):
        """
        Compute the variance and standard error of the weight of each filters.
        """

        P = self.P
        S = self.S
        if 'train' in self.XS:
            XS = self.XS['train']
        X = self.X['train']

        # trA = {name: (XS[name].T * (np.linalg.inv(XS[name].T @ XS[name] + P[name]) @ XS[name].T)).sum(0) for name in self.P}
        edf = self.edf

        if self.distr == 'gaussian':

            y = self.y['train']
            y_pred = self.y_pred[w_type]['train']
            rsd = y - y_pred  # residuals
            rss = np.sum(rsd**2)  # residul sum of squares
            rss_var = {
                name: rss / (len(y) - edf[name])
                for name in self.filter_names
            }

            V = {}
            b_se = {}
            w_se = {}
            for name in self.filter_names:
                if name in S:
                    # check sample size
                    if len(XS[name]) < self.edf[name]:
                        print(
                            'Sample size is too small for getting reasonable confidence interval.'
                        )
                    # compute weight covariance
                    try:
                        V[name] = np.linalg.inv(XS[name].T @ XS[name] +
                                                P[name]) * rss_var[name]
                    except:
                        # if inv failed, use pinv
                        V[name] = np.linalg.pinv(XS[name].T @ XS[name] +
                                                 P[name]) * rss_var[name]

                    # remove negative correlation?
                    # https://math.stackexchange.com/q/4018326
                    V[name] = np.abs(V[name])

                    b_se[name] = np.sqrt(np.diag(V[name]))
                    w_se[name] = S[name] @ b_se[name]

                else:
                    if len(X[name]) < np.product(np.array(self.dims[name])):
                        print(
                            'Sample size is too small for getting reasonable confidence interval.'
                        )
                    V[name] = np.linalg.inv(
                        X[name].T @ X[name]) * rss_var[name]
                    V[name] = np.abs(V[name])
                    w_se[name] = np.sqrt(np.diag(V[name]))

        else:

            b = {}
            w = {}
            u = {}
            U = {}
            V = {}
            w_se = {}
            b_se = {}
            for name in self.filter_names:
                if name in S:
                    # check sample size
                    if len(XS[name]) < self.edf[name]:
                        print(
                            'Sample size is too small for getting reasonable confidence interval.'
                        )

                    b[name] = self.b[w_type][name]
                    u[name] = self.fnl(XS[name] @ b[name],
                                       self.filter_nonlinearity[name])
                    U[name] = 1 / self.fnl(
                        u[name], self.output_nonlinearity).flatten()**2

                    try:
                        V[name] = np.linalg.inv(XS[name].T *
                                                U[name] @ XS[name] + P[name])
                    except:
                        V[name] = np.linalg.pinv(XS[name].T *
                                                 U[name] @ XS[name] + P[name])

                    V[name] = np.abs(V[name])
                    b_se[name] = np.sqrt(np.diag(V[name]))
                    w_se[name] = S[name] @ b_se[name]
                else:

                    if len(X[name]) < np.product(np.array(self.dims[name])):
                        print(
                            'Sample size is too small for getting reasonable confidence interval.'
                        )

                    w[name] = self.w[w_type][name]
                    u[name] = self.fnl(X[name] @ w[name],
                                       self.filter_nonlinearity[name])
                    U[name] = 1 / self.fnl(
                        u[name], self.output_nonlinearity).flatten()**2

                    try:
                        V[name] = np.linalg.inv(X[name].T * U[name] @ X[name])
                    except:
                        V[name] = np.linalg.pinv(X[name].T * U[name] @ X[name])
                    V[name] = np.abs(V[name])
                    w_se[name] = np.sqrt(np.diag(V[name]))

        self.V[w_type] = V
        self.b_se[w_type] = b_se
        self.w_se[w_type] = w_se