def test_grad_fun1_fd(self): for test_params in self.params: #gtrue = self.x.sum(0) gtrue = self.gradtrue(test_params) fun = self.fun() epsilon = 1e-6 gfd = numdiff.approx_fprime1(test_params, fun, epsilon=epsilon, args=self.args) gfd += numdiff.approx_fprime1(test_params, fun, epsilon=-epsilon, args=self.args) gfd /= 2. assert_almost_equal(gtrue, gfd, decimal=DEC6)
def test_grad_fun1_fdc(self): for test_params in self.params: #gtrue = self.x.sum(0) gtrue = self.gradtrue(test_params) fun = self.fun() epsilon = 1e-6 #default epsilon 1e-6 is not precise enough gfd = numdiff.approx_fprime1(test_params, fun, epsilon=1e-8, args=self.args, centered=True) assert_almost_equal(gtrue, gfd, decimal=DEC5)
def test_score(self): pass #assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) for test_params in self.params: sc = self.mod.score(test_params) scfd = numdiff.approx_fprime1(test_params.ravel(), self.mod.loglike) assert_almost_equal(sc, scfd, decimal=1) sccs = numdiff.approx_fprime_cs(test_params.ravel(), self.mod.loglike) assert_almost_equal(sc, sccs, decimal=13)
print approx_fprime((1, 2, 3), fun, epsilon, x) gradtrue = x.sum(0) print x.sum(0) gradcs = approx_fprime_cs((1, 2, 3), fun, (x, ), h=1.0e-20) print gradcs, maxabs(gradcs, gradtrue) print approx_hess_cs((1, 2, 3), fun, (x, ), h=1.0e-20) #this is correctly zero print approx_hess_cs((1, 2, 3), fun2, (y, x), h=1.0e-20) - 2 * np.dot(x.T, x) print numdiff.approx_hess(xk, fun2, 1e-3, (y, x))[0] - 2 * np.dot(x.T, x) gt = (-x * 2 * (y - np.dot(x, [1, 2, 3]))[:, None]) g = approx_fprime_cs((1, 2, 3), fun1, (y, x), h=1.0e-20) #.T #this shouldn't be transposed gd = numdiff.approx_fprime1((1, 2, 3), fun1, epsilon, (y, x)) print maxabs(g, gt) print maxabs(gd, gt) import statsmodels.api as sm data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog) #mod = sm.Probit(data.endog, data.exog) mod = sm.Logit(data.endog, data.exog) #res = mod.fit(method="newton") test_params = [1, 0.25, 1.4, -7] loglike = mod.loglike score = mod.score hess = mod.hessian
y = np.dot(x, beta) + 0.1*np.random.randn(nobs) xkols = np.dot(np.linalg.pinv(x),y) print approx_fprime((1,2,3),fun,epsilon,x) gradtrue = x.sum(0) print x.sum(0) gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20) print gradcs, maxabs(gradcs, gradtrue) print approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20) #this is correctly zero print approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x) print numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x) gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None]) g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T #this shouldn't be transposed gd = numdiff.approx_fprime1((1,2,3),fun1,epsilon,(y,x)) print maxabs(g, gt) print maxabs(gd, gt) import statsmodels.api as sm data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog) #mod = sm.Probit(data.endog, data.exog) mod = sm.Logit(data.endog, data.exog) #res = mod.fit(method="newton") test_params = [1,0.25,1.4,-7] loglike = mod.loglike score = mod.score hess = mod.hessian