Пример #1
0
def jacob(lam, n_eq, I, WS):
    """Log-Jacobian for SUR Error model
    
    Parameters
    ----------
    lam      : array
               n_eq by 1 array of spatial autoregressive parameters
    n_eq     : int
               number of equations
    I        : sparse matrix
               sparse Identity matrix
    WS       : sparse matrix
               sparse spatial weights matrix
    
    Returns
    -------
    logjac   : float
               the log Jacobian
    
    """
    logjac = 0.0
    for r in range(n_eq):
        lami = lam[r]
        lamWS = WS.multiply(lami)
        B = (I - lamWS).tocsc()
        LU = SuperLU(B)
        jj = np.sum(np.log(np.abs(LU.U.diagonal())))
        logjac += jj
    return logjac
Пример #2
0
def lag_c_loglik_sp(rho, n, e0, e1, I, Wsp):
    # concentrated log-lik for lag model, sparse algebra
    if isinstance(rho, np.ndarray):
        if rho.shape == (1,1):
            rho = rho[0][0] #why does the interior value change?
    er = e0 - rho * e1
    sig2 = spdot(er.T, er) / n
    nlsig2 = (n / 2.0) * np.log(sig2)
    a = I - rho * Wsp
    LU = SuperLU(a.tocsc())
    jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
    clike = nlsig2 - jacob
    return clike
Пример #3
0
def err_c_loglik_sp(lam, n, y, ylag, x, xlag, I, Wsp):
    # concentrated log-lik for error model, no constants, LU
    if isinstance(lam, np.ndarray):
        if lam.shape == (1, 1):
            lam = lam[0][0]  #why does the interior value change?
    ys = y - lam * ylag
    xs = x - lam * xlag
    ysys = np.dot(ys.T, ys)
    xsxs = np.dot(xs.T, xs)
    xsxsi = np.linalg.inv(xsxs)
    xsys = np.dot(xs.T, ys)
    x1 = np.dot(xsxsi, xsys)
    x2 = np.dot(xsys.T, x1)
    ee = ysys - x2
    sig2 = ee[0][0] / n
    nlsig2 = (n / 2.0) * np.log(sig2)
    a = I - lam * Wsp
    LU = SuperLU(a.tocsc())
    jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
    # this is the negative of the concentrated log lik for minimization
    clik = nlsig2 - jacob
    return clik