예제 #1
0
    def _P(self, v):
        """
        Let ­ЮЎ║ be the optimal covariance matrix under the null hypothesis.
        Given ­Юљ», this method computes

            ­ЮЎ┐­Юљ» = ­ЮЎ║РЂ╗┬╣­Юљ» - ­ЮЎ║РЂ╗┬╣­ЮЎ╝(­ЮЎ╝рхђ­ЮЎ║РЂ╗┬╣­ЮЎ╝)РЂ╗┬╣­ЮЎ╝рхђ­ЮЎ║РЂ╗┬╣­Юљ».
        """
        from numpy_sugar.linalg import rsolve
        from scipy.linalg import cho_solve

        x = rsolve(self._lmm.covariance(), v)
        if self._lmm.X is not None:
            Lh = self._lmm._terms["Lh"]
            t = self._lmm.X @ cho_solve(Lh, self._lmm.M.T @ x)
            x -= rsolve(self._lmm.covariance(), t)

        return x
예제 #2
0
    def posteriori_mean(self):
        r"""Mean of the estimated posteriori.

        This is also the maximum a posteriori estimation of the latent variable.
        """
        from numpy_sugar.linalg import rsolve

        Sigma = self.posteriori_covariance()
        eta = self._ep._posterior.eta
        return dot(Sigma, eta + rsolve(GLMM.covariance(self), self.mean()))
예제 #3
0
파일: _lmm.py 프로젝트: mindis/glimix-core
    def beta(self):
        """
        Fixed-effect sizes.

        Returns
        -------
        effect-sizes : numpy.ndarray
            Optimal fixed-effect sizes.

        Notes
        -----
        Setting the derivative of log(p(𝐲)) over effect sizes equal
        to zero leads to solutions 𝜷 from equation ::

            (QᵀX)ᵀD⁻¹(QᵀX)𝜷 = (QᵀX)ᵀD⁻¹(Qᵀ𝐲).
        """
        from numpy_sugar.linalg import rsolve

        return rsolve(self._X["VT"], rsolve(self._X["tX"], self.mean()))
예제 #4
0
def test_rsolve():
    random = RandomState(0)
    A = random.randn(1, 1)
    b = random.randn(1)

    assert_allclose(solve(A, b), npy_solve(A, b))

    A = random.randn(2, 2)
    b = random.randn(2)

    assert_allclose(solve(A, b), npy_solve(A, b))

    A = random.randn(3, 3)
    b = random.randn(3)

    assert_allclose(rsolve(A, b), npy_solve(A, b))

    A[:] = 1e-10
    assert_allclose(rsolve(A, b), zeros(A.shape[1]))

    A = zeros((0, 0))
    b = zeros((0, ))
    assert_(rsolve(A, b).ndim == 1)
    assert_(rsolve(A, b).shape[0] == 0)

    A = zeros((0, 1))
    b = zeros((0, ))
    assert_(rsolve(A, b).ndim == 1)
    assert_(rsolve(A, b).shape[0] == 1)
예제 #5
0
파일: _lmm.py 프로젝트: mindis/glimix-core
    def _update_beta(self):
        from numpy_sugar.linalg import rsolve

        assert not self._fix["beta"]
        if self._optimal["beta"]:
            return

        yTQDiQTm = list(self._yTQDiQTX)
        mTQDiQTm = list(self._XTQDiQTX)
        nominator = yTQDiQTm[0]
        denominator = mTQDiQTm[0]

        if len(yTQDiQTm) > 1:
            nominator += yTQDiQTm[1]
            denominator += mTQDiQTm[1]

        self._tbeta[:] = rsolve(denominator, nominator)
        self._optimal["beta"] = True
        self._optimal["scale"] = False
예제 #6
0
def fit_bb_glm(a, d, X, offset=0, theta=None, maxiter=100, tol=1e-5):
    """Fits generalized linear model with Beta-Binomial likelihood.

    Uses iteratively reweighted least squares / Fisher scoring.

    Args:
        a: Vector successes.
        d: Vector of trials.
        X: Design matrix.
        offset: Untrainable offset parameter.
        theta: Dispersion parameter. If None, estimate alternatingly.
        maxiter: Maximum number of iterations
        tol: Break if mean absolute change in estimated parameters is below tol.

    Returns:
        Regression coefficients, estimated dispersion parameter and number of
        iterations.
    """
    from numpy_sugar.linalg import rsolve

    a = atleast_2d_column(a)
    d = atleast_2d_column(d)
    X = atleast_2d_column(X)

    y = a / d

    fit_dispersion = theta is None

    is_bernoulli = False
    if np.array_equal(y, y.astype(bool)) and fit_dispersion:
        is_bernoulli = True
        d = (d > 0).astype(float)
        theta = 0
        fit_dispersion = False

    if fit_dispersion:
        data = np.hstack([a, d - a])

    beta = rsolve(X.T @ X, X.T @ y)
    converged = False
    for i in range(maxiter):
        eta = X @ beta + offset
        mu = logistic(eta)

        if fit_dispersion:
            m = np.hstack([mu, 1 - mu])
            maxiter = min(10**(i + 1), 1000)
            (s, niter) = fit_polya_precision(data=data, m=m, maxiter=maxiter)
            theta = 1 / s

        gprime = 1 / ((1 - mu) * mu)
        z = eta + gprime * (y - mu) - offset

        W = d * mu * (1 - mu) * (theta + 1)
        W = W / (d * theta + 1)

        XW = (W * X).T
        beta_new = rsolve(XW @ X, XW @ z)

        if np.abs(beta - beta_new).mean() < tol:
            converged = True
            break

        beta = beta_new

    if not converged:
        print('Warning: Model did not converge. Try increasing maxiter.')

    if is_bernoulli:
        theta = np.inf
    return beta, theta, i