Exemplo n.º 1
0
 def __init__(self,
              endog,
              exog,
              exog_infl=None,
              offset=None,
              exposure=None,
              inflation='logit',
              p=2,
              missing='none',
              **kwargs):
     super(ZeroInflatedNegativeBinomialP,
           self).__init__(endog,
                          exog,
                          offset=offset,
                          inflation=inflation,
                          exog_infl=exog_infl,
                          exposure=exposure,
                          missing=missing,
                          **kwargs)
     self.model_main = NegativeBinomialP(self.endog,
                                         self.exog,
                                         offset=offset,
                                         exposure=exposure,
                                         p=p)
     self.distribution = zinegbin
     self.k_exog += 1
     self.k_extra += 1
     self.exog_names.append("alpha")
     self.result_class = ZeroInflatedNegativeBinomialResults
     self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
     self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
     self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
Exemplo n.º 2
0
    def setup_class(cls):
        # here we don't need to check convergence from default start_params
        start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
                        -2.88, 1.14]
        mod = NegativeBinomialP(endog, exog)   # checks also that default p=2
        res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
        marge = res.get_margeff()
        cls.res = res
        cls.margeff = marge

        cls.res1_slice = slice(None, None, None)
        cls.res1 = res_stata.results_negbin_margins_cont
        cls.rtol_fac = 5e1
Exemplo n.º 3
0
    def setup_class(cls):
        # here we don't need to check convergence from default start_params
        start_params = [
            13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552, -2.88, 1.14
        ]
        mod = NegativeBinomialP(endog, exog)  # checks also that default p=2
        res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
        marge = res.get_margeff()
        cls.res = res
        cls.margeff = marge

        cls.res1_slice = slice(None, None, None)
        cls.res1 = res_stata.results_negbin_margins_cont
        cls.rtol_fac = 5e1
Exemplo n.º 4
0
 def setup_class(cls):
     cls.klass = NegativeBinomialP
     # using newton has results much closer to Stata than bfgs
     res1 = NegativeBinomialP(endog, exog).fit(method="newton", maxiter=300)
     cls.res1 = res1
     cls.res2 = resp.results_nb_docvis
     cls.pred_kwds_mean = {}
     cls.pred_kwds_6 = {}
     cls.k_infl = 0
     cls.rtol = 1e-8
Exemplo n.º 5
0
 def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
              inflation='logit', p=2, missing='none', **kwargs):
     super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
                                               offset=offset,
                                               inflation=inflation,
                                               exog_infl=exog_infl,
                                               exposure=exposure,
                                               missing=missing, **kwargs)
     self.model_main = NegativeBinomialP(self.endog, self.exog,
         offset=offset, exposure=exposure, p=p)
     self.distribution = zinegbin
     self.k_exog += 1
     self.k_extra += 1
     self.exog_names.append("alpha")
     self.result_class = ZeroInflatedNegativeBinomialResults
     self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
     self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
     self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
Exemplo n.º 6
0
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
    __doc__ = """
    Zero Inflated Generalized Negative Binomial model for count data

    %(params)s
    %(extra_params)s

    Attributes
    -----------
    endog : array
        A reference to the endogenous response variable
    exog : array
        A reference to the exogenous design.
    exog_infl: array
        A reference to the zero-inflated exogenous design.
    p: scalar
        P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
    p=2 for ZINB-2. Default is p=2
    """ % {'params' : base._model_params_doc,
           'extra_params' : _doc_zi_params +
           """p : float
        dispersion power parameter for the NegativeBinomialP model.  p=1 for
        ZINB-1 and p=2 for ZINM-2. Default is p=2
    """ + base._missing_param_doc}

    def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
                 inflation='logit', p=2, missing='none', **kwargs):
        super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
                                                  offset=offset,
                                                  inflation=inflation,
                                                  exog_infl=exog_infl,
                                                  exposure=exposure,
                                                  missing=missing, **kwargs)
        self.model_main = NegativeBinomialP(self.endog, self.exog,
            offset=offset, exposure=exposure, p=p)
        self.distribution = zinegbin
        self.k_exog += 1
        self.k_extra += 1
        self.exog_names.append("alpha")
        self.result_class = ZeroInflatedNegativeBinomialResults
        self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
        self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
        self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper

    def _get_init_kwds(self):
        kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
        kwds['p'] = self.model_main.parameterization
        return kwds

    def _predict_prob(self, params, exog, exog_infl, exposure, offset):
        params_infl = params[:self.k_inflate]
        params_main = params[self.k_inflate:]

        p = self.model_main.parameterization
        counts = np.arange(0, np.max(self.endog)+1)

        if len(exog_infl.shape) < 2:
            transform = True
            w = np.atleast_2d(
                self.model_infl.predict(params_infl, exog_infl))[:, None]
        else:
            transform = False
            w = self.model_infl.predict(params_infl, exog_infl)[:, None]

        w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
        mu = self.model_main.predict(params_main, exog,
            exposure=exposure, offset=offset)[:, None]
        result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
        return result[0] if transform else result

    def _get_start_params(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=ConvergenceWarning)
            start_params = self.model_main.fit(disp=0, method='nm').params
        start_params = np.append(np.zeros(self.k_inflate), start_params)
        return start_params
Exemplo n.º 7
0
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
    __doc__ = """
    Zero Inflated Generalized Negative Binomial Model

    %(params)s
    %(extra_params)s

    Attributes
    ----------
    endog : ndarray
        A reference to the endogenous response variable
    exog : ndarray
        A reference to the exogenous design.
    exog_infl : ndarray
        A reference to the zero-inflated exogenous design.
    p : scalar
        P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
    p=2 for ZINB-2. Default is p=2
    """ % {'params' : base._model_params_doc,
           'extra_params' : _doc_zi_params +
           """p : float
        dispersion power parameter for the NegativeBinomialP model.  p=1 for
        ZINB-1 and p=2 for ZINM-2. Default is p=2
    """ + base._missing_param_doc}

    def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
                 inflation='logit', p=2, missing='none', **kwargs):
        super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
                                                  offset=offset,
                                                  inflation=inflation,
                                                  exog_infl=exog_infl,
                                                  exposure=exposure,
                                                  missing=missing, **kwargs)
        self.model_main = NegativeBinomialP(self.endog, self.exog,
            offset=offset, exposure=exposure, p=p)
        self.distribution = zinegbin
        self.k_exog += 1
        self.k_extra += 1
        self.exog_names.append("alpha")
        self.result_class = ZeroInflatedNegativeBinomialResults
        self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
        self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
        self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper

    def _get_init_kwds(self):
        kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
        kwds['p'] = self.model_main.parameterization
        return kwds

    def _predict_prob(self, params, exog, exog_infl, exposure, offset,
                      y_values=None):
        params_infl = params[:self.k_inflate]
        params_main = params[self.k_inflate:]

        p = self.model_main.parameterization
        if y_values is None:
            y_values = np.arange(0, np.max(self.endog)+1)

        if len(exog_infl.shape) < 2:
            transform = True
            w = np.atleast_2d(
                self.model_infl.predict(params_infl, exog_infl))[:, None]
        else:
            transform = False
            w = self.model_infl.predict(params_infl, exog_infl)[:, None]

        w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
        mu = self.model_main.predict(params_main, exog,
            exposure=exposure, offset=offset)[:, None]
        result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
        return result[0] if transform else result

    def _predict_var(self, params, mu, prob_infl):
        """predict values for conditional variance V(endog | exog)

        Parameters
        ----------
        params : array_like
            The model parameters. This is only used to extract extra params
            like dispersion parameter.
        mu : array_like
            Array of mean predictions for main model.
        prob_inlf : array_like
            Array of predicted probabilities of zero-inflation `w`.

        Returns
        -------
        Predicted conditional variance.
        """
        alpha = params[-1]
        w = prob_infl
        p = self.model_main.parameterization
        var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
        return var_

    def _get_start_params(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=ConvergenceWarning)
            start_params = self.model_main.fit(disp=0, method='nm').params
        start_params = np.append(np.zeros(self.k_inflate), start_params)
        return start_params

    @Appender(ZeroInflatedPoisson.get_distribution.__doc__)
    def get_distribution(self, params, exog=None, exog_infl=None,
                         exposure=None, offset=None):

        p = self.model_main.parameterization
        mu = self.predict(params, exog=exog, exog_infl=exog_infl,
                          exposure=exposure, offset=offset, which="mean-main")
        w = self.predict(params, exog=exog, exog_infl=exog_infl,
                         exposure=exposure, offset=offset, which="prob-main")

        distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
        return distr