示例#1
0
    def test_beta(self,
                  b0_vals,
                  param_nums,
                  ftol=10**-5,
                  maxiter=30,
                  print_weights=1):
        """
        Returns the profile log likelihood for regression parameters
        'param_num' at 'b0_vals.'

        Parameters
        ----------
        b0_vals: list
            The value of parameters to be tested

        param_num: list
            Which parameters to be tested

        maxiter: int, optional
            How many iterations to use in the EM algorithm.  Default is 30

        ftol: float, optional
            The function tolerance for the EM optimization.
            Default is 10''**''-5

        print_weights: bool
            If true, returns the weights tate maximize the profile
            log likelihood. Default is False

        Returns
        -------

        test_results: tuple
            The log-likelihood and p-pvalue of the test.

        Notes
        ----

        The function will warn if the EM reaches the maxiter.  However, when
        optimizing over nuisance parameters, it is possible to reach a
        maximum number of inner iterations for a specific value for the
        nuisance parameters while the resultsof the function are still valid.
        This usually occurs when the optimization over the nuisance parameters
        selects paramater values that yield a log-likihood ratio close to
        infinity.

        Examples
        -------

        import statsmodels.api as sm
        import numpy as np

        # Test parameter is .05 in one regressor no intercept model
        data=sm.datasets.heart.load()
        y = np.log10(data.endog)
        x = data.exog
        cens = data.censors
        model = sm.emplike.emplikeAFT(y, x, cens)
        res=model.test_beta([0], [0])
        >>>res
        >>>(1.4657739632606308, 0.22601365256959183)

        #Test slope is 0 in  model with intercept

        data=sm.datasets.heart.load()
        y = np.log10(data.endog)
        x = data.exog
        cens = data.censors
        model = sm.emplike.emplikeAFT(y, sm.add_constant(x, prepend=1), cens)
        res=model.test_beta([0], [1])
        >>>res
        >>>(4.623487775078047, 0.031537049752572731)

        """
        censors = self.model.censors
        endog = self.model.endog
        exog = self.model.exog
        uncensored = (censors == 1).flatten()
        censored = (censors == 0).flatten()
        uncens_endog = endog[uncensored]
        uncens_exog = exog[uncensored, :]
        reg_model = OLS(uncens_endog, uncens_exog).fit()
        llr, pval, new_weights = reg_model.el_test(
            b0_vals, param_nums, return_weights=True)  # Needs to be changed
        km = self.model._make_km(endog, censors).flatten()  # when merged
        uncens_nobs = self.model.uncens_nobs
        F = np.asarray(new_weights).reshape(uncens_nobs)
        # Step 0 ^
        params = self.params()
        survidx = np.where(censors == 0)
        survidx = survidx[0] - np.arange(len(survidx[0]))
        numcensbelow = np.int_(np.cumsum(1 - censors))
        if len(param_nums) == len(params):
            llr = self._EM_test([],
                                F=F,
                                params=params,
                                param_nums=param_nums,
                                b0_vals=b0_vals,
                                survidx=survidx,
                                uncens_nobs=uncens_nobs,
                                numcensbelow=numcensbelow,
                                km=km,
                                uncensored=uncensored,
                                censored=censored,
                                ftol=ftol,
                                maxiter=25)
            return llr, chi2.sf(llr, self.model.nvar)
        else:
            x0 = np.delete(params, param_nums)
            try:
                res = optimize.fmin(
                    self._EM_test,
                    x0,
                    (params, param_nums, b0_vals, F, survidx, uncens_nobs,
                     numcensbelow, km, uncensored, censored, maxiter, ftol),
                    full_output=1,
                    disp=0)

                llr = res[1]
                return llr, chi2.sf(llr, len(param_nums))
            except np.linalg.linalg.LinAlgError:
                return np.inf, 0
示例#2
0
    def test_beta(self, b0_vals, param_nums, ftol=10 ** - 5, maxiter=30,
                  print_weights=1):
        """
        Returns the profile log likelihood for regression parameters
        'param_num' at 'b0_vals.'

        Parameters
        ----------
        b0_vals: list
            The value of parameters to be tested

        param_num: list
            Which parameters to be tested

        maxiter: int, optional
            How many iterations to use in the EM algorithm.  Default is 30

        ftol: float, optional
            The function tolerance for the EM optimization.
            Default is 10''**''-5

        print_weights: bool
            If true, returns the weights tate maximize the profile
            log likelihood. Default is False

        Returns
        -------

        test_results: tuple
            The log-likelihood and p-pvalue of the test.

        Notes
        ----

        The function will warn if the EM reaches the maxiter.  However, when
        optimizing over nuisance parameters, it is possible to reach a
        maximum number of inner iterations for a specific value for the
        nuisance parameters while the resultsof the function are still valid.
        This usually occurs when the optimization over the nuisance parameters
        selects paramater values that yield a log-likihood ratio close to
        infinity.

        Examples
        -------

        import statsmodels.api as sm
        import numpy as np

        # Test parameter is .05 in one regressor no intercept model
        data=sm.datasets.heart.load()
        y = np.log10(data.endog)
        x = data.exog
        cens = data.censors
        model = sm.emplike.emplikeAFT(y, x, cens)
        res=model.test_beta([0], [0])
        >>>res
        >>>(1.4657739632606308, 0.22601365256959183)

        #Test slope is 0 in  model with intercept

        data=sm.datasets.heart.load()
        y = np.log10(data.endog)
        x = data.exog
        cens = data.censors
        model = sm.emplike.emplikeAFT(y, sm.add_constant(x, prepend=1), cens)
        res=model.test_beta([0], [1])
        >>>res
        >>>(4.623487775078047, 0.031537049752572731)

        """
        censors = self.model.censors
        endog = self.model.endog
        exog = self.model.exog
        uncensored = (censors == 1).flatten()
        censored = (censors == 0).flatten()
        uncens_endog = endog[uncensored]
        uncens_exog = exog[uncensored, :]
        reg_model = OLS(uncens_endog, uncens_exog).fit()
        llr, pval, new_weights = reg_model.el_test(b0_vals, param_nums, return_weights=True)  # Needs to be changed
        km = self.model._make_km(endog, censors).flatten()  # when merged
        uncens_nobs = self.model.uncens_nobs
        F = np.asarray(new_weights).reshape(uncens_nobs)
        # Step 0 ^
        params = self.params()
        survidx = np.where(censors == 0)
        survidx = survidx[0] - np.arange(len(survidx[0]))
        numcensbelow = np.int_(np.cumsum(1 - censors))
        if len(param_nums) == len(params):
            llr = self._EM_test([], F=F, params=params,
                                      param_nums=param_nums,
                                b0_vals=b0_vals, survidx=survidx,
                             uncens_nobs=uncens_nobs,
                             numcensbelow=numcensbelow, km=km,
                             uncensored=uncensored, censored=censored,
                             ftol=ftol, maxiter=25)
            return llr, chi2.sf(llr, self.model.nvar)
        else:
            x0 = np.delete(params, param_nums)
            try:
                res = optimize.fmin(self._EM_test, x0,
                                   (params, param_nums, b0_vals, F, survidx,
                                    uncens_nobs, numcensbelow, km, uncensored,
                                    censored, maxiter, ftol), full_output=1,
                                    disp = 0)

                llr = res[1]
                return llr, chi2.sf(llr, len(param_nums))
            except np.linalg.linalg.LinAlgError:
                return np.inf, 0