def lag_c_loglik(rho, n, e0, e1, W): # concentrated log-lik for lag model, no constants, brute force er = e0 - rho * e1 sig2 = spdot(er.T, er) / n nlsig2 = (n / 2.0) * np.log(sig2) a = -rho * W spfill_diagonal(a, 1.0) jacob = np.log(np.linalg.det(a)) # this is the negative of the concentrated log lik for minimization clik = nlsig2 - jacob return clik
def lag_c_loglik(rho, n, e0, e1, W): # concentrated log-lik for lag model, no constants, brute force er = e0 - rho * e1 sig2 = np.dot(er.T, er) / n nlsig2 = (n / 2.0) * np.log(sig2) a = -rho * W spfill_diagonal(a, 1.0) jacob = np.log(np.linalg.det(a)) # this is the negative of the concentrated log lik for minimization clik = nlsig2 - jacob return clik
b = b0 - rho * b1 betas = np.vstack((b, rho)) # rho added as last coefficient u = e0 - rho * e1 predy = y - u xb = spdot(x, b) predy_e = inverse_prod( w.sparse, xb, rho, inv_method="power_exp", threshold=epsilon) e_pred = y - predy_e sig2 = spdot(e_pred.T, e_pred) / n # information matrix # if w should be kept sparse, how can we do the following: a = -rho * W spfill_diagonal(a, 1.0) ai = spinv(a) wai = spdot(W, ai) tr1 = wai.diagonal().sum() #same for sparse and dense wai2 = spdot(wai, wai) tr2 = wai2.diagonal().sum() waiTwai = spdot(wai.T, wai) tr3 = waiTwai.diagonal().sum() ### to here wpredy = weights.lag_spatial(w, predy_e) wpyTwpy = spdot(wpredy.T, wpredy) xTwpy = spdot(x.T, wpredy)
def __init__(self, y, x, w, method='full', epsilon=0.0000001): # set up main regression variables and spatial filters self.y = y self.x = x self.n, self.k = self.x.shape self.method = method self.epsilon = epsilon #W = w.full()[0] #Wsp = w.sparse ylag = ps.lag_spatial(w, y) # b0, b1, e0 and e1 xtx = spdot(self.x.T, self.x) xtxi = la.inv(xtx) xty = spdot(self.x.T, self.y) xtyl = spdot(self.x.T, ylag) b0 = np.dot(xtxi, xty) b1 = np.dot(xtxi, xtyl) e0 = self.y - spdot(x, b0) e1 = ylag - spdot(x, b1) methodML = method.upper() # call minimizer using concentrated log-likelihood to get rho if methodML in ['FULL', 'LU', 'ORD']: if methodML == 'FULL': W = w.full()[0] # moved here res = minimize_scalar(lag_c_loglik, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, W), method='bounded', tol=epsilon) elif methodML == 'LU': I = sp.identity(w.n) Wsp = w.sparse # moved here W = Wsp res = minimize_scalar(lag_c_loglik_sp, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, I, Wsp), method='bounded', tol=epsilon) elif methodML == 'ORD': # check on symmetry structure if w.asymmetry(intrinsic=False) == []: ww = symmetrize(w) WW = np.array(ww.todense()) evals = la.eigvalsh(WW) W = WW else: W = w.full()[0] # moved here evals = la.eigvals(W) res = minimize_scalar(lag_c_loglik_ord, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, evals), method='bounded', tol=epsilon) else: # program will crash, need to catch print("{0} is an unsupported method".format(methodML)) self = None return self.rho = res.x[0][0] # compute full log-likelihood, including constants ln2pi = np.log(2.0 * np.pi) llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0 self.logll = llik[0][0] # b, residuals and predicted values b = b0 - self.rho * b1 self.betas = np.vstack((b, self.rho)) # rho added as last coefficient self.u = e0 - self.rho * e1 self.predy = self.y - self.u xb = spdot(x, b) self.predy_e = inverse_prod(w.sparse, xb, self.rho, inv_method="power_exp", threshold=epsilon) self.e_pred = self.y - self.predy_e # residual variance self._cache = {} self.sig2 = self.sig2n # no allowance for division by n-k # information matrix # if w should be kept sparse, how can we do the following: a = -self.rho * W spfill_diagonal(a, 1.0) ai = spinv(a) wai = spdot(W, ai) tr1 = wai.diagonal().sum() #same for sparse and dense wai2 = np.dot(wai, wai) tr2 = wai2.diagonal().sum() waiTwai = np.dot(wai.T, wai) tr3 = waiTwai.diagonal().sum() ### to here wpredy = ps.lag_spatial(w, self.predy_e) wpyTwpy = np.dot(wpredy.T, wpredy) xTwpy = spdot(x.T, wpredy) # order of variables is beta, rho, sigma2 v1 = np.vstack( (xtx / self.sig2, xTwpy.T / self.sig2, np.zeros((1, self.k)))) v2 = np.vstack((xTwpy / self.sig2, tr2 + tr3 + wpyTwpy / self.sig2, tr1 / self.sig2)) v3 = np.vstack((np.zeros( (self.k, 1)), tr1 / self.sig2, self.n / (2.0 * self.sig2**2))) v = np.hstack((v1, v2, v3)) self.vm1 = la.inv(v) # vm1 includes variance for sigma2 self.vm = self.vm1[:-1, :-1] # vm is for coefficients only
def __init__(self, y, x, w, method="full", epsilon=0.0000001): # set up main regression variables and spatial filters self.y = y self.x = x self.n, self.k = self.x.shape self.method = method self.epsilon = epsilon # W = w.full()[0] # Wsp = w.sparse ylag = ps.lag_spatial(w, y) # b0, b1, e0 and e1 xtx = spdot(self.x.T, self.x) xtxi = la.inv(xtx) xty = spdot(self.x.T, self.y) xtyl = spdot(self.x.T, ylag) b0 = np.dot(xtxi, xty) b1 = np.dot(xtxi, xtyl) e0 = self.y - spdot(x, b0) e1 = ylag - spdot(x, b1) methodML = method.upper() # call minimizer using concentrated log-likelihood to get rho if methodML in ["FULL", "LU", "ORD"]: if methodML == "FULL": W = w.full()[0] # moved here res = minimize_scalar( lag_c_loglik, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, W), method="bounded", tol=epsilon ) elif methodML == "LU": I = sp.identity(w.n) Wsp = w.sparse # moved here W = Wsp res = minimize_scalar( lag_c_loglik_sp, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, I, Wsp), method="bounded", tol=epsilon, ) elif methodML == "ORD": # check on symmetry structure if w.asymmetry(intrinsic=False) == []: ww = symmetrize(w) WW = np.array(ww.todense()) evals = la.eigvalsh(WW) W = WW else: W = w.full()[0] # moved here evals = la.eigvals(W) res = minimize_scalar( lag_c_loglik_ord, 0.0, bounds=(-1.0, 1.0), args=(self.n, e0, e1, evals), method="bounded", tol=epsilon, ) else: # program will crash, need to catch print("{0} is an unsupported method".format(methodML)) self = None return self.rho = res.x[0][0] # compute full log-likelihood, including constants ln2pi = np.log(2.0 * np.pi) llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0 self.logll = llik[0][0] # b, residuals and predicted values b = b0 - self.rho * b1 self.betas = np.vstack((b, self.rho)) # rho added as last coefficient self.u = e0 - self.rho * e1 self.predy = self.y - self.u xb = spdot(x, b) self.predy_e = inverse_prod(w.sparse, xb, self.rho, inv_method="power_exp", threshold=epsilon) self.e_pred = self.y - self.predy_e # residual variance self._cache = {} self.sig2 = self.sig2n # no allowance for division by n-k # information matrix # if w should be kept sparse, how can we do the following: a = -self.rho * W spfill_diagonal(a, 1.0) ai = spinv(a) wai = spdot(W, ai) tr1 = wai.diagonal().sum() # same for sparse and dense wai2 = np.dot(wai, wai) tr2 = wai2.diagonal().sum() waiTwai = np.dot(wai.T, wai) tr3 = waiTwai.diagonal().sum() ### to here wpredy = ps.lag_spatial(w, self.predy_e) wpyTwpy = np.dot(wpredy.T, wpredy) xTwpy = spdot(x.T, wpredy) # order of variables is beta, rho, sigma2 v1 = np.vstack((xtx / self.sig2, xTwpy.T / self.sig2, np.zeros((1, self.k)))) v2 = np.vstack((xTwpy / self.sig2, tr2 + tr3 + wpyTwpy / self.sig2, tr1 / self.sig2)) v3 = np.vstack((np.zeros((self.k, 1)), tr1 / self.sig2, self.n / (2.0 * self.sig2 ** 2))) v = np.hstack((v1, v2, v3)) self.vm1 = la.inv(v) # vm1 includes variance for sigma2 self.vm = self.vm1[:-1, :-1] # vm is for coefficients only