Example #1
0
 def bse(self):
     params = self.params
     if not fast_kalman or self.model.method == "css":
         if len(params) == 1:  # can't take an inverse
             return np.sqrt(-1.0 / approx_hess_cs(params, self.model.loglike, epsilon=1e-5))
         return np.sqrt(np.diag(-inv(approx_hess_cs(params, self.model.loglike, epsilon=1e-5))))
     else:
         if len(params) == 1:
             return np.sqrt(-1.0 / approx_hess(params, self.model.loglike, epsilon=1e-3)[0])
         return np.sqrt(np.diag(-inv(approx_hess(params, self.model.loglike, epsilon=1e-3)[0])))
Example #2
0
 def bse(self):
     params = self.params
     if not fast_kalman or self.model.method == "css":
         if len(params) == 1: # can't take an inverse
             return np.sqrt(-1./approx_hess_cs(params,
                 self.model.loglike, epsilon=1e-5))
         return np.sqrt(np.diag(-inv(approx_hess_cs(params,
             self.model.loglike, epsilon=1e-5))))
     else:
         if len(params) == 1:
             return np.sqrt(-1./approx_hess(params,
                 self.model.loglike, epsilon=1e-3)[0])
         return np.sqrt(np.diag(-inv(approx_hess(params,
             self.model.loglike, epsilon=1e-3)[0])))
Example #3
0
 def cov_params(self): # add scale argument?
     func = self.model.loglike
     x0 = self.params
     if not fast_kalman or self.model.method == "css":
         return -inv(approx_hess_cs(x0, func))
     else:
         return -inv(approx_hess(x0, func, epsilon=1e-3)[0])
Example #4
0
 def cov_params(self):  # add scale argument?
     func = self.model.loglike
     x0 = self.params
     if not fast_kalman or self.model.method == "css":
         return -inv(approx_hess_cs(x0, func))
     else:
         return -inv(approx_hess(x0, func, epsilon=1e-3)[0])
 def bse(self): # allow user to specify?
     if self.model.method == "cmle": # uses different scale/sigma definition
         resid = self.resid
         ssr = np.dot(resid,resid)
         ols_scale = ssr/(self.nobs - self.k_ar - self.k_trend)
         return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
     else:
         hess = approx_hess(self.params, self.model.loglike)
         return np.sqrt(np.diag(-np.linalg.inv(hess[0])))
Example #6
0
 def bse(self):  # allow user to specify?
     if self.model.method == "cmle":  # uses different scale/sigma definition
         resid = self.resid
         ssr = np.dot(resid, resid)
         ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
         return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
     else:
         hess = approx_hess(self.params, self.model.loglike)
         return np.sqrt(np.diag(-np.linalg.inv(hess[0])))
Example #7
0
    def hessian(self, params):
        """
        Compute the Hessian at params,

        Notes
        -----
        This is a numerical approximation.
        """
        loglike = self.loglike
        if self.transparams:
            params = self._invtransparams(params)
#        return approx_hess_cs(params, loglike, epsilon=1e-5)
        return approx_hess(params, loglike, epsilon=1e-5)
Example #8
0
    def hessian(self, params):
        """
        Compute the Hessian at params,

        Notes
        -----
        This is a numerical approximation.
        """
        loglike = self.loglike
        if self.transparams:
            params = self._invtransparams(params)
        #        return approx_hess_cs(params, loglike, epsilon=1e-5)
        return approx_hess(params, loglike, epsilon=1e-5)
Example #9
0
    def hessian(self, params):
        """
        Compute the Hessian at params,

        Notes
        -----
        This is a numerical approximation.
        """
        loglike = self.loglike
        #if self.transparams:
        #    params = self._invtransparams(params)
        if not fast_kalman or self.method == "css":
            return approx_hess_cs(params, loglike, epsilon=1e-5)
        else:
            return approx_hess(params, self.loglike, epsilon=1e-3)[0]
Example #10
0
    def hessian(self, params):
        """
        Compute the Hessian at params,

        Notes
        -----
        This is a numerical approximation.
        """
        loglike = self.loglike
        #if self.transparams:
        #    params = self._invtransparams(params)
        if not fast_kalman or self.method == "css":
            return approx_hess_cs(params, loglike, epsilon=1e-5)
        else:
            return approx_hess(params, self.loglike, epsilon=1e-3)[0]
Example #11
0
yn = y + 0.2 * np.random.normal(size=len(x))
popt, pcov = curve_fit(func, x, yn)
print 'curve_fit results:'
print 'values:', popt
print 'errors:', np.sqrt(pcov.diagonal())

"""And here is how to compute the fit parameter values and errors
using one of the other optimizers (exemplified with fmin) and 
a method to compute the Hesse matrix"""
def chi2(pars):
    chi = yn - func(x, *pars)
    return (chi ** 2).sum()
popt = fmin(chi2, p0, disp=False)
from numpy.dual import inv
from scikits.statsmodels.sandbox.regression.numdiff import approx_hess3 as approx_hess
phess = approx_hess(popt, chi2)
def approx_covar(hess, red_chi2):
    return red_chi2 * inv(phess / 2.)
red_chi2 = chi2(popt) / (len(x) - len(p0))
pcov = approx_covar(popt, red_chi2)
print 'fmin and approx_hess results:'
print 'values:', popt
print 'errors:', np.sqrt(pcov.diagonal())

"""Just to check, here is what Minuit has to say"""
from minuit import Minuit
def chi2(a, b, c):
    chi = yn - func(x, a, b, c)
    return (chi ** 2).sum()

m = Minuit(chi2, a=2.5, b=1.3, c=0.5)
Example #12
0
data_exog = sm.add_constant(rvs)
xbeta = 0.9 + 0.1 * rvs.sum(1)
data_endog = xbeta + 0.1 * np.random.standard_t(5, size=nobs)
#print data_endog

modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1] + 2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params=modp.start_value)
print resp.params
print resp.bse

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess

hb = -approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)[0]
tmp = modp.loglike(modp.start_value)
print tmp.shape
'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)
>>> np.dot(modp.exog, beta).shape
Traceback (most recent call last):
Example #13
0
print '\nResults with TLinearModel'
print '-------------------------'
resp = modp.fit(start_params=modp.start_params,
                disp=1,
                method='nm',
                maxfun=10000,
                maxiter=5000)  #'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')

print 'using Nelder-Mead'
print resp.params
print resp.bse
resp2 = modp.fit(start_params=resp.params, method='Newton')
print 'using Newton'
print resp2.params
print resp2.bse

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess

hb = -approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)[0]
tmp = modp.loglike(modp.start_params)
print tmp.shape
print 'eigenvalues of numerical Hessian'
print np.linalg.eigh(np.linalg.inv(hb))[0]

#store_params is only available in original test script
##pp=np.array(store_params)
##print pp.min(0)
##print pp.max(0)
Example #14
0
    return approx_fprime1(params, self.nloglike)
  File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
    nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()

'''

res_bfgs = mod_norm2.fit(start_params=start_params,
                         method="bfgs",
                         fprime=None,
                         maxiter=500,
                         retall=0)

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess
hb = -approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)[0]
hf = -approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)[0]
hh = (hf + hb) / 2.
print np.linalg.eigh(hh)

grad = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print grad
gradb = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print(gradb + gradf) / 2.

print res_norm3.model.score(res_norm3.params)
print res_norm3.model.score(start_params)
mod_norm2.loglike(start_params / 2.)
print np.linalg.inv(-1 * mod_norm2.hessian(res_norm3.params))
print np.sqrt(np.diag(res_bfgs.cov_params()))
    score = lambda params: -self.score(params)
  File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
    return approx_fprime1(params, self.nloglike)
  File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
    nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()

'''

res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)[0]
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)[0]
hh = (hf+hb)/2.
print np.linalg.eigh(hh)

grad = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print grad
gradb = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime1(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print (gradb+gradf)/2.

print res_norm3.model.score(res_norm3.params)
print res_norm3.model.score(start_params)
mod_norm2.loglike(start_params/2.)
print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
print np.sqrt(np.diag(res_bfgs.cov_params()))

print '\nResults with TLinearModel'
print   '-------------------------'
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm',
                maxfun=10000, maxiter=5000)#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')

print 'using Nelder-Mead'
print resp.params
print resp.bse
resp2 = modp.fit(start_params = resp.params, method='Newton')
print 'using Newton'
print resp2.params
print resp2.bse

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess

hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)[0]
tmp = modp.loglike(modp.start_params)
print tmp.shape
print 'eigenvalues of numerical Hessian'
print np.linalg.eigh(np.linalg.inv(hb))[0]

#store_params is only available in original test script
##pp=np.array(store_params)
##print pp.min(0)
##print pp.max(0)


Example #17
0
 def hessian(self, AB_mask):
     """
     Returns numerical hessian.
     """
     loglike = self.loglike
     return approx_hess(AB_mask, loglike)[0]
Example #18
0
 def hessian(self, params):
     """
     Returns numerical hessian for now.
     """
     loglike = self.loglike
     return approx_hess(params, loglike)[0]
Example #19
0
 def hessian(self, params):
     """
     Returns numerical hessian for now.
     """
     loglike = self.loglike
     return approx_hess(params, loglike)[0]
Example #20
0
 def hessian(self, AB_mask):
     """
     Returns numerical hessian.
     """
     loglike = self.loglike
     return approx_hess(AB_mask, loglike)[0]
data_exog = sm.add_constant(rvs)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(5, size=nobs)
#print data_endog

modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1]+2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params = modp.start_value)
print resp.params
print resp.bse

from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess

hb=-approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)[0]
tmp = modp.loglike(modp.start_value)
print tmp.shape


'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)