Esempio n. 1
0
    def test_hess(self):
        # NOTE: I had to overwrite this to lessen the tolerance
        for test_params in self.params:
            he = self.mod.hessian(test_params)
            hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
            assert_almost_equal(he, hefd, decimal=DEC8)

            # NOTE: notice the accuracy below and the epsilon changes
            # this doesn't work well for score -> hessian with non-cs step
            # it's a little better around the optimum
            assert_almost_equal(he, hefd, decimal=7)
            hefd = numdiff.approx_fprime(test_params,
                                         self.mod.score,
                                         centered=True)
            assert_almost_equal(he, hefd, decimal=4)
            hefd = numdiff.approx_fprime(test_params,
                                         self.mod.score,
                                         1e-9,
                                         centered=False)
            assert_almost_equal(he, hefd, decimal=2)

            hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
            assert_almost_equal(he, hescs, decimal=DEC8)

            hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
            assert_almost_equal(he, hecs, decimal=5)
            # NOTE: these just don't work well
            # hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
            # assert_almost_equal(he, hecs, decimal=1)
            # hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
            # assert_almost_equal(he, hecs, decimal=0)
            hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
            assert_almost_equal(he, hecs, decimal=0)
Esempio n. 2
0
def test_dtypes():
    def f(x):
        return 2 * x

    desired = np.array([[2, 0], [0, 2]])
    assert_allclose(numdiff.approx_fprime(np.array([1, 2]), f), desired)
    assert_allclose(numdiff.approx_fprime(np.array([1., 2.]), f), desired)
    assert_allclose(numdiff.approx_fprime(np.array([1. + 0j, 2. + 0j]), f),
                    desired)
Esempio n. 3
0
 def test_grad_fun1_fd(self):
     for test_params in self.params:
         gtrue = self.gradtrue(test_params)
         fun = self.fun()
         epsilon = 1e-6
         gfd = numdiff.approx_fprime(test_params,
                                     fun,
                                     epsilon=epsilon,
                                     args=self.args)
         gfd += numdiff.approx_fprime(test_params,
                                      fun,
                                      epsilon=-epsilon,
                                      args=self.args)
         gfd /= 2.
         assert_almost_equal(gtrue, gfd, decimal=DEC6)
Esempio n. 4
0
def test_deriv(link):
    # Check link function derivatives using numeric differentiation.
    np.random.seed(24235)
    for k in range(10):  # TODO: WTF does looping over k do here?
        p = np.random.uniform(0, 1)
        d = link.deriv(p)
        da = nd.approx_fprime(np.r_[p], link)
        assert_allclose(d, da, rtol=1e-6, atol=1e-6)
Esempio n. 5
0
    def test_score(self):
        for test_params in self.params:
            sc = self.mod.score(test_params)
            scfd = numdiff.approx_fprime(test_params.ravel(), self.mod.loglike)
            assert_almost_equal(sc, scfd, decimal=1)

            sccs = numdiff.approx_fprime_cs(test_params.ravel(),
                                            self.mod.loglike)
            assert_almost_equal(sc, sccs, decimal=11)
Esempio n. 6
0
def check_score_hessian(results):
    # GH#4620
    # compare models core and hessian with numerical derivatives

    params = results.params
    # avoid checking score at MLE, score close to zero
    sc = results.model.score(params * 0.98, scale=1)
    # cs currently (0.9) does not work for all families
    # sc2 = approx_fprime_cs(params * 0.98, results.model.loglike)
    llfunc = lambda x: results.model.loglike(x, scale=1)
    sc2 = approx_fprime(params * 0.98, llfunc)
    assert_allclose(sc, sc2, rtol=0.05)

    hess = results.model.hessian(params, scale=1)
    hess2 = approx_hess(params, llfunc)
    assert_allclose(hess, hess2, rtol=0.05)
    scfunc = lambda x: results.model.score(x, scale=1)
    hess3 = approx_fprime(params, scfunc)
    assert_allclose(hess, hess3, rtol=0.05)
Esempio n. 7
0
    def deriv2(self, p):
        """
        Second derivative of the link function g''(p)

        implemented through numerical differentiation
        """
        from sm2.tools.numdiff import approx_fprime
        p = np.atleast_1d(p)
        # Note: special function for norm.ppf does not support complex
        return np.diag(approx_fprime(p, self.deriv, centered=True))
Esempio n. 8
0
    def test_hess(self):
        for test_params in self.params:
            he = self.mod.hessian(test_params)
            hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
            assert_almost_equal(he, hefd, decimal=DEC8)

            # NOTE: notice the accuracy below
            assert_almost_equal(he, hefd, decimal=7)
            hefd = numdiff.approx_fprime(test_params,
                                         self.mod.score,
                                         centered=True)
            assert_allclose(he, hefd, rtol=1e-9)
            hefd = numdiff.approx_fprime(test_params,
                                         self.mod.score,
                                         centered=False)
            assert_almost_equal(he, hefd, decimal=4)

            hescs = numdiff.approx_fprime_cs(test_params.ravel(),
                                             self.mod.score)
            assert_allclose(he, hescs, rtol=1e-13)

            hecs = numdiff.approx_hess_cs(test_params.ravel(),
                                          self.mod.loglike)
            assert_allclose(he, hecs, rtol=1e-9)

            # NOTE: Look at the lack of precision - default epsilon not always
            # best
            grad = self.mod.score(test_params)
            hecs, gradcs = numdiff.approx_hess1(test_params,
                                                self.mod.loglike,
                                                1e-6,
                                                return_grad=True)
            assert_almost_equal(he, hecs, decimal=1)
            assert_almost_equal(grad, gradcs, decimal=1)
            hecs, gradcs = numdiff.approx_hess2(test_params,
                                                self.mod.loglike,
                                                1e-4,
                                                return_grad=True)
            assert_almost_equal(he, hecs, decimal=3)
            assert_almost_equal(grad, gradcs, decimal=1)
            hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-5)
            assert_almost_equal(he, hecs, decimal=4)
Esempio n. 9
0
    def test_grad_fun1_fdc(self):
        for test_params in self.params:
            gtrue = self.gradtrue(test_params)
            fun = self.fun()

            gfd = numdiff.approx_fprime(test_params,
                                        fun,
                                        epsilon=1e-8,
                                        args=self.args,
                                        centered=True)
            assert_almost_equal(gtrue, gfd, decimal=DEC5)
Esempio n. 10
0
    def score(self, params):
        """
        Return the gradient of the loglikelihood at params.

        Parameters
        ----------
        params : array-like
            The parameter values at which to evaluate the score function.

        Notes
        -----
        Returns numerical gradient.
        """
        loglike = self.loglike
        return approx_fprime(params, loglike, epsilon=1e-8)
Esempio n. 11
0
def test_deriv2(link):
    # Check link function second derivatives using numeric differentiation.
    np.random.seed(24235)

    # TODO: Resolve errors with the numeric derivatives
    if link is probit:
        raise pytest.skip()
    for k in range(10):  # TODO: WTF does looping over k do here?
        p = np.random.uniform(0, 1)
        p = np.clip(p, 0.01, 0.99)
        if link is cauchy:
            p = np.clip(p, 0.03, 0.97)
        d = link.deriv2(p)
        da = nd.approx_fprime(np.r_[p], link.deriv)
        assert_allclose(d, da, rtol=1e-6, atol=1e-6)
Esempio n. 12
0
    def score_obs(self, params):
        """
        Generic Zero Inflated model score (gradient) vector of the
        log-likelihood

        Parameters
        ----------
        params : array-like
            The parameters of the model

        Returns
        -------
        score : ndarray, 1-D
            The score vector of the model, i.e. the first derivative of the
            loglikelihood function, evaluated at `params`
        """
        params_infl = params[:self.k_inflate]
        params_main = params[self.k_inflate:]

        y = self.endog
        w = self.model_infl.predict(params_infl)
        w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
        score_main = self.model_main.score_obs(params_main)
        llf_main = self.model_main.loglikeobs(params_main)
        llf = self.loglikeobs(params)
        zero_idx = np.nonzero(y == 0)[0]
        nonzero_idx = np.nonzero(y)[0]

        dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
        dldw = np.zeros_like(self.exog_infl, dtype=np.float64)

        dldp[zero_idx, :] = (score_main[zero_idx].T *
                             (1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
        dldp[nonzero_idx, :] = score_main[nonzero_idx]

        if self.inflation == 'logit':
            dldw[zero_idx, :] = (self.exog_infl[zero_idx].T * w[zero_idx] *
                                 (1 - w[zero_idx]) *
                                 (1 - np.exp(llf_main[zero_idx])) /
                                 np.exp(llf[zero_idx])).T
            dldw[nonzero_idx, :] = -(self.exog_infl[nonzero_idx].T *
                                     w[nonzero_idx]).T
        elif self.inflation == 'probit':
            # TODO: Maybe do this _before_ all the junk above?
            return approx_fprime(params, self.loglikeobs)

        return np.hstack((dldw, dldp))
Esempio n. 13
0
def test_pdf_equiv():
    # Check that two implementations of MNLogit.pdf agree
    nobs = 10**4
    J = 8
    k_exog = 10
    np.random.seed(8)
    exog = np.random.randn(nobs, k_exog)
    endog = np.random.randint(0, J, size=nobs)
    # wendog = pd.get_dummies(endog).values

    model = MNLogit(endog, exog)
    params = np.random.random((model.K * (model.J - 1)))
    params = params.reshape(model.K, -1, order='F')

    Xb = model.exog.dot(params)
    pdf1 = model.pdf(Xb)
    pdf2 = mnlogit_pdf(Xb)

    assert_allclose(pdf1, pdf2)

    # Check that differentiating cdf matches pdf
    pdf1 = np.zeros((J - 1, J - 1))
    for k in range(J - 1):
        func = lambda x: model.cdf(x.reshape(1, -1))[0][k]  # noqa:E731
        pdf1[k, :] = approx_fprime(Xb[0], func)

    pdf2 = model.pdf(Xb[:1])[0]
    # Differentiating model.cdf ignores the "base" column of wendog; i.e.
    # computes (J-1)x(J-1) partials.  model.pdf computes JxJ partials, but
    # the non-overlapping ones should be redundant because the partials
    # should sum to zero over both columns and rows.
    assert_allclose(pdf2.sum(0), 0, atol=1e-14)
    assert_allclose(pdf2.sum(1), 0, atol=1e-14)
    # We can therefore drop the inconveniently mismatched row and column
    pdf3 = pdf2[1:, :-1].T
    # TODO: clarify which axis means what
    assert_allclose(pdf3, pdf1, atol=5e-8)
Esempio n. 14
0
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
                       dummy_ind, count_ind, method, J):
    """
    Computes the variance-covariance of marginal effects by the delta method.

    Parameters
    ----------
    model : model instance
        The model that returned the fitted results. Its pdf method is used
        for computing the Jacobian of discrete variables in dummy_ind and
        count_ind
    params : array-like
        estimated model parameters
    exog : array-like
        exogenous variables at which to calculate the derivative
    cov_params : array-like
        The variance-covariance of the parameters
    at : str
       Options are:

        - 'overall', The average of the marginal effects at each
          observation.
        - 'mean', The marginal effects at the mean of each regressor.
        - 'median', The marginal effects at the median of each regressor.
        - 'zero', The marginal effects at zero for each regressor.
        - 'all', The marginal effects at each observation.

        Only overall has any effect here.you

    derivative : function or array-like
        If a function, it returns the marginal effects of the model with
        respect to the exogenous variables evaluated at exog. Expected to be
        called derivative(params, exog). This will be numerically
        differentiated. Otherwise, it can be the Jacobian of the marginal
        effects with respect to the parameters.
    dummy_ind : array-like
        Indices of the columns of exog that contain dummy variables
    count_ind : array-like
        Indices of the columns of exog that contain count variables

    Notes
    -----
    For continuous regressors, the variance-covariance is given by

    Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'

    where V is the parameter variance-covariance.

    The outer Jacobians are computed via numerical differentiation if
    derivative is a function.
    """
    if callable(derivative):
        from sm2.tools.numdiff import approx_fprime_cs
        params = params.ravel('F')  # for Multinomial
        try:
            jacobian_mat = approx_fprime_cs(params,
                                            derivative,
                                            args=(exog, method))
        except TypeError:  # stats.norm.cdf doesn't take complex values
            from sm2.tools.numdiff import approx_fprime
            jacobian_mat = approx_fprime(params,
                                         derivative,
                                         args=(exog, method))
        if at == 'overall':
            jacobian_mat = np.mean(jacobian_mat, axis=1)
        else:
            jacobian_mat = jacobian_mat.squeeze()  # exog was 2d row vector
        if dummy_ind is not None:
            jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
                                                     params, exog, dummy_ind,
                                                     method, J)
        if count_ind is not None:
            jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
                                                     params, exog, count_ind,
                                                     method, J)
    else:
        jacobian_mat = derivative

    # NOTE: this won't go through for at == 'all'
    return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)