Beispiel #1
0
def pooled(df,
           yvar,
           xvar,
           groupvar,
           model='probit',
           cov_type='sandwich',
           theta0=0,
           deriv=2):
    print('Pooled', model)
    Nobs, k, n, T, y, x = panel_setup(df, yvar, xvar, groupvar)
    Qfun = lambda beta, out: Q_pooled(y, x, T, beta, model, out)
    if np.isscalar(theta0):
        theta0 = np.zeros((k + 1, 1))
    res = M.estimation(Qfun,
                       theta0=np.zeros((k, 1)),
                       deriv=deriv,
                       cov_type=cov_type,
                       parnames=xvar)
    xb, Gx, gx = Qfun(res.theta_hat, out='predict')
    APE = np.mean(gx) * res.theta_hat
    res.update(
        dict(
            zip(['yvar', 'xvar', 'Nobs', 'k', 'n', 'T', 'APE'],
                [yvar, xvar, Nobs, k, n, T, APE])))
    print_output(res)
    return res
Beispiel #2
0
def rand_effect(df,
                yvar,
                xvar,
                groupvar,
                model='probit',
                cov_type='Ainv',
                theta0=0,
                deriv=1):
    print('Random effects', model)
    Nobs, k, n, T, y, x = panel_setup(df, yvar, xvar, groupvar)
    Qfun = lambda beta, out: Q_RE(y, x, T, beta, model, out)
    if np.isscalar(theta0):
        theta0 = np.zeros((k + 1, 1))
        theta0[-1] = 1
    res = M.estimation(Qfun,
                       theta0,
                       deriv=deriv,
                       cov_type=cov_type,
                       parnames=xvar + ['sigma_a'])
    res.sigma_a = res.theta_hat[-1]
    xb, Gx, gx = Qfun(res.theta_hat / np.sqrt(1 + res.sigma_a**2),
                      out='predict')
    APE = np.mean(gx) * res.theta_hat / np.sqrt(1 + res.sigma_a**2)
    res.update(
        dict(
            zip(['yvar', 'xvar', 'Nobs', 'k', 'n', 'T', 'APE'],
                [yvar, xvar, Nobs, k, n, T, APE])))
    print_output(res,
                 ['parnames', 'theta_hat', 'se', 't-values', 'jac', 'APE'])
    return res
def clogit(y, x, cov_type='Ainv', theta0=None, deriv=0, quiet=False):
    # Objective function and derivatives for
    N, J, K, palt, xalt, xvars = labels(x)
    Qfun = lambda theta, out: Q_clogit(theta, y, x, out)

    if theta0 is None:
        theta0 = np.zeros((K, 1))

    res = M.estimation(Qfun, theta0, deriv, cov_type, parnames=xvars)
    # v, p, dv = Qfun(res.theta_hat, out='predict')
    res.update(
        dict(zip(['yvar', 'xvars', 'N', 'K', 'n'], ['y', xvars, N, K, N])))

    if quiet == False:
        print('Conditional logit')
        print('Initial log-likelihood', -Qfun(theta0, 'Q'))
        print('Initial gradient\n', -Qfun(theta0, 'dQ'))
        print_output(res)

    return res
Beispiel #4
0
def tobit(y, x, cov_type='Ainv', theta0=None, deriv=1, quiet=False):
    # Objective function and derivatives for
    Qfun = lambda theta, out: Q_tobit(theta, y, x, out)
    N, K, xvars = labels(x)
    if theta0 is None:
        theta0 = np.zeros((K + 1, 1))
        b = la.inv(x.T @ x) @ x.T @ y
        theta0[0:-1, :] = b
        theta0[-1, :] = np.sqrt(np.mean((y - x @ b)**2))

    res = M.estimation(Qfun, theta0, deriv, cov_type, parnames=xvars)
    res.update(
        dict(zip(['yvar', 'xvars', 'N', 'K', 'n'], ['y', xvars, N, K, N])))
    if quiet == False:
        print('Tobit model')
        print('Fractions of observations that are censored: ',
              np.mean(1 * (y == 0)))
        print('Initial log-likelihood', -Qfun(theta0, 'Q'))
        print('Initial gradient\n', -Qfun(theta0, 'dQ'))
        print_output(res)
    return res