def compute_weighting_matrix(p, optimal_weight=False):
    """
    Function to compute the weighting matrix for the GMM estimator.

    Args:
        p (OG-USA Specifications object): model parameters
        optimal_weight (boolean): whether to use an optimal
            weighting matrix or not

    Returns:
        W (Numpy array): Weighting matrix

    """
    # determine weighting matrix
    if optimal_weight:
        # This uses the inverse of the VCV matrix for the data moments
        # more precisely estimated moments get more weight
        # Reference: Gourieroux, Monfort, and Renault (1993,
        # Journal of Applied Econometrics)
        # read in SCF
        n = 1000  # number of bootstrap iterations
        scf = wealth.get_wealth_data(scf_yrs_list=[2019],
                                     web=True,
                                     directory=None)
        VCV_data_moments = wealth.VCV_moments(scf, n, p.lambdas, p.J)
        W = np.linalg.inv(VCV_data_moments)
    else:
        # Assumes use 2 more moments than there are parameters
        W = np.identity(p.J + 2)

    return W
示例#2
0
def test_get_wealth_data():
    '''
    Test of reading wealth data.

    Need SCF data which is too large to check into repo so this will
    be flagged so as to not run on TravisCI.
    '''
    df = wealth.get_wealth_data()

    assert isinstance(df, pd.DataFrame)
示例#3
0
def wealth_moments_table(base_ss, base_params, table_format=None, path=None):
    '''
    Creates table with moments of the wealth distribution from the model
    and SCF data.

    Args:
        base_ss (dictionary): SS output from baseline run
        base_params (OG-USA Specifications class): baseline parameters
            object
        table_format (string): format to return table in: 'csv', 'tex',
            'excel', 'json', if None, a DataFrame is returned
        path (string): path to save table to

    Returns:
        table (various): table in DataFrame or string format or `None`
            if saved to disk

    '''
    table_dict = {
        'Moment': [
            'Share 0-25%', 'Share 25-50%', 'Share 50-70%', 'Share 70-80%',
            'Share 80-90%', 'Share 90-99%', 'Share 99-100%',
            'Gini Coefficient', 'var(ln(Wealth))'
        ],
        'Data': [],
        'Model': []
    }
    base_ineq = Inequality(base_ss['bssmat_splus1'], base_params.omega_SS,
                           base_params.lambdas, base_params.S, base_params.J)
    base_values = [
        1 - base_ineq.top_share(0.75),
        base_ineq.top_share(0.75) - base_ineq.top_share(0.5),
        base_ineq.top_share(0.5) - base_ineq.top_share(0.3),
        base_ineq.top_share(0.3) - base_ineq.top_share(0.2),
        base_ineq.top_share(0.2) - base_ineq.top_share(0.1),
        base_ineq.top_share(0.1) - base_ineq.top_share(0.01),
        base_ineq.top_share(0.01),
        base_ineq.gini(),
        base_ineq.var_of_logs()
    ]
    table_dict['Model'].extend(base_values)
    # get moments from Survey of Consumer Finances data
    scf = wealth.get_wealth_data()
    table_dict['Data'] = wealth.compute_wealth_moments(scf,
                                                       base_params.lambdas)
    # Make df with dict so can use pandas functions
    table_df = pd.DataFrame.from_dict(table_dict)
    table = save_return_table(table_df, table_format, path, precision=3)

    return table
示例#4
0
def test_compute_wealth_moments():
    '''
    Test of computation of wealth moments.

    Need SCF data which is too large to check into repo so this will
    be flagged so as to not run on TravisCI.
    '''
    expected_moments = np.array([
        -4.36938131e-03, 1.87063661e-02, 5.89720538e-02, 6.10665862e-02,
        1.17776715e-01, 3.87790368e-01, 3.60041151e-01, 8.45051216e-01,
        4.97530422e+00])
    df = wealth.get_wealth_data()
    test_moments = wealth.compute_wealth_moments(
        df, np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01]))

    assert(np.allclose(expected_moments, test_moments, rtol=0.001))
示例#5
0
def test_compute_wealth_moments():
    '''
    Test of computation of wealth moments.

    Need SCF data which is too large to check into repo so this will
    be flagged so as to not run on TravisCI.
    '''
    expected_moments = np.array([
        -4.42248572e-03, 1.87200063e-02, 5.78230550e-02, 5.94466440e-02,
        1.15413004e-01, 3.88100712e-01, 3.64919063e-01, 8.47639595e-01,
        5.04231901e+00
    ])
    df = wealth.get_wealth_data()
    test_moments = wealth.compute_wealth_moments(
        df, np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01]))

    assert (np.allclose(expected_moments, test_moments, rtol=0.001))
示例#6
0
def runner(output_base,
           input_dir,
           baseline=False,
           analytical_mtrs=True,
           reform={},
           user_params={},
           guid='',
           run_micro=True):

    from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline,
                                     analytical_mtrs=analytical_mtrs,
                                     reform=reform,
                                     guid=guid)
    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(
            float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    globals().update(run_params)

    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=input_dir)

    # Generate labor data moments
    labor.labor_data_moments(flag_graphs, output_dir=input_dir)

    get_baseline = True
    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq',
        'calibrate_model', 'retire', 'mean_income_data', 'g_n_vector',
        'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline', 'omega', 'g_n_ss',
        'omega_SS', 'surv_rate', 'e', 'rho'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS with minimization to fit chi_b and chi_n
    ------------------------------------------------------------------------
    '''

    # This is the simulation before getting the replacement rate values
    sim_params = {}
    glbs = globals()
    lcls = locals()
    for key in param_names:
        if key in glbs:
            sim_params[key] = glbs[key]
        else:
            sim_params[key] = lcls[key]

    sim_params['output_dir'] = input_dir
    sim_params['run_params'] = run_params

    income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(
        **sim_params)

    ss_outputs = SS.run_steady_state(income_tax_params,
                                     ss_parameters,
                                     iterative_params,
                                     get_baseline,
                                     calibrate_model,
                                     output_dir=input_dir)
    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    ss_outputs['get_baseline'] = get_baseline
    sim_params['input_dir'] = input_dir
    income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \
    b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params)
    ss_outputs['income_tax_params'] = income_tax_params
    ss_outputs['wealth_tax_params'] = wealth_tax_params
    ss_outputs['ellipse_params'] = ellipse_params
    ss_outputs['parameters'] = parameters
    ss_outputs['N_tilde'] = N_tilde
    ss_outputs['omega_stationary'] = omega_stationary
    ss_outputs['K0'] = K0
    ss_outputs['b_sinit'] = b_sinit
    ss_outputs['b_splus1init'] = b_splus1init
    ss_outputs['L0'] = L0
    ss_outputs['Y0'] = Y0
    ss_outputs['r0'] = r0
    ss_outputs['BQ0'] = BQ0
    ss_outputs['T_H_0'] = T_H_0
    ss_outputs['tax0'] = tax0
    ss_outputs['c0'] = c0
    ss_outputs['initial_b'] = initial_b
    ss_outputs['initial_n'] = initial_n
    ss_outputs['tau_bq'] = tau_bq
    ss_outputs['g_n_vector'] = g_n_vector
    ss_outputs['output_dir'] = input_dir

    with open("ss_outputs.pkl", 'wb') as fp:
        pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_time_path_iteration(
        **ss_outputs)

    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)
示例#7
0
def chi_estimate(
    income_tax_params,
    ss_params,
    iterative_params,
    chi_guesses,
    baseline_dir="./OUTPUT",
):
    """
    --------------------------------------------------------------------
    This function calls others to obtain the data momements and then
    runs the simulated method of moments estimation by calling the
    minimization routine.

    INPUTS:
    income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
    ss_parameters         = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
                            g_n_ss, tau_payroll, retire, mean_income_data,\
                            h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
    iterative_params      = [2,] vector, vector with max iterations and tolerance
                             for SS solution
    chi_guesses           = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
    baseline_dir          = string, path where baseline results located


    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
    wealth.compute_wealth_moments()
    labor.labor_data_moments()
    minstat()

    OBJECTS CREATED WITHIN FUNCTION:
    wealth_moments     = [J+2,] array, wealth moments from data
    labor_moments      = [S,] array, labor moments from data
    data_moments       = [J+2+S,] array, wealth and labor moments stacked
    bnds               = [S+J,] array, bounds for parameter estimates
    chi_guesses_flat   =  [J+S,] vector, initial guesses of chi_b and chi_n stacked
    min_arg            = length 6 tuple, variables needed for minimizer
    est_output         = dictionary, output from minimizer
    chi_params         = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
    objective_func_min = scalar, minimum of statistical objective function


    OUTPUT:
    ./baseline_dir/Calibration/chi_estimation.pkl


    RETURNS: chi_params
    --------------------------------------------------------------------
    """

    # unpack tuples of parameters
    (
        J,
        S,
        T,
        BW,
        beta,
        sigma,
        alpha,
        Z,
        delta,
        ltilde,
        nu,
        g_y,
        g_n_ss,
        tau_payroll,
        tau_bq,
        rho,
        omega_SS,
        lambdas,
        imm_rates,
        e,
        retire,
        mean_income_data,
        h_wealth,
        p_wealth,
        m_wealth,
        b_ellipse,
        upsilon,
    ) = ss_params
    chi_b_guess, chi_n_guess = chi_guesses

    flag_graphs = False

    # specify bootstrap iterations
    n = 10000

    # Generate Wealth data moments
    scf, data = wealth.get_wealth_data()
    wealth_moments = wealth.compute_wealth_moments(scf, lambdas, J)

    # Generate labor data moments
    cps = labor.get_labor_data()
    labor_moments = labor.compute_labor_moments(cps, S)

    # combine moments
    data_moments = list(wealth_moments.flatten()) + list(
        labor_moments.flatten()
    )

    # determine weighting matrix
    optimal_weight = False
    if optimal_weight:
        VCV_wealth_moments = wealth.VCV_moments(scf, n, lambdas, J)
        VCV_labor_moments = labor.VCV_moments(cps, n, lambdas, S)
        VCV_data_moments = np.zeros((J + 2 + S, J + 2 + S))
        VCV_data_moments[: J + 2, : J + 2] = VCV_wealth_moments
        VCV_data_moments[J + 2 :, J + 2 :] = VCV_labor_moments
        W = np.linalg.inv(VCV_data_moments)
        # np.savetxt('VCV_data_moments.csv',VCV_data_moments)
    else:
        W = np.identity(J + 2 + S)

    # call minimizer
    bnds = np.tile(
        np.array([1e-12, None]), (S + J, 1)
    )  # Need (1e-12, None) S+J times
    chi_guesses_flat = list(chi_b_guess.flatten()) + list(
        chi_n_guess.flatten()
    )

    min_args = (
        data_moments,
        W,
        income_tax_params,
        ss_params,
        iterative_params,
        chi_guesses_flat,
        baseline_dir,
    )
    # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds,
    #                 tol=1e-15, options={'maxfun': 1, 'maxiter': 1, 'maxls': 1})
    # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds,
    #                 tol=1e-15)
    # chi_params = est_output.x
    # objective_func_min = est_output.fun
    #
    # # pickle output
    # utils.mkdirs(os.path.join(baseline_dir, "Calibration"))
    # est_dir = os.path.join(baseline_dir, "Calibration/chi_estimation.pkl")
    # pickle.dump(est_output, open(est_dir, "wb"))
    #
    # # save data and model moments and min stat to csv
    # # to then put in table of paper
    chi_params = chi_guesses_flat
    chi_b = chi_params[:J]
    chi_n = chi_params[J:]
    chi_params_list = (chi_b, chi_n)

    ss_output = SS.run_SS(
        income_tax_params,
        ss_params,
        iterative_params,
        chi_params_list,
        True,
        baseline_dir,
    )
    model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J)

    # # make dataframe for results
    # columns = ['data_moment', 'model_moment', 'minstat']
    # moment_fit = pd.DataFrame(index=range(0,J+2+S), columns=columns)
    # moment_fit = moment_fit.fillna(0) # with 0s rather than NaNs
    # moment_fit['data_moment'] = data_moments
    # moment_fit['model_moment'] = model_moments
    # moment_fit['minstat'] = objective_func_min
    # est_dir = os.path.join(baseline_dir, "Calibration/moment_results.pkl")s
    # moment_fit.to_csv(est_dir)

    # calculate std errors
    h = 0.0001  # pct change in parameter
    model_moments_low = np.zeros((len(chi_params), len(model_moments)))
    model_moments_high = np.zeros((len(chi_params), len(model_moments)))
    chi_params_low = chi_params
    chi_params_high = chi_params
    for i in range(len(chi_params)):
        chi_params_low[i] = chi_params[i] * (1 + h)
        chi_b = chi_params_low[:J]
        chi_n = chi_params_low[J:]
        chi_params_list = (chi_b, chi_n)
        ss_output = SS.run_SS(
            income_tax_params,
            ss_params,
            iterative_params,
            chi_params_list,
            True,
            baseline_dir,
        )
        model_moments_low[i, :] = calc_moments(
            ss_output, omega_SS, lambdas, S, J
        )

        chi_params_high[i] = chi_params[i] * (1 + h)
        chi_b = chi_params_high[:J]
        chi_n = chi_params_high[J:]
        chi_params_list = (chi_b, chi_n)
        ss_output = SS.run_SS(
            income_tax_params,
            ss_params,
            iterative_params,
            chi_params_list,
            True,
            baseline_dir,
        )
        model_moments_high[i, :] = calc_moments(
            ss_output, omega_SS, lambdas, S, J
        )

    deriv_moments = (
        np.asarray(model_moments_high) - np.asarray(model_moments_low)
    ).T / (2.0 * h * np.asarray(chi_params))
    VCV_params = np.linalg.inv(
        np.dot(np.dot(deriv_moments.T, W), deriv_moments)
    )
    std_errors_chi = (np.diag(VCV_params)) ** (1 / 2.0)
    sd_dir = os.path.join(baseline_dir, "Calibration/chi_std_errors.pkl")
    with open(sd_dir, "wb") as f:
        pickle.dump(std_errors_chi, f)

    np.savetxt("chi_std_errors.csv", std_errors_chi)

    return chi_params
get_baseline = Flag to run baseline or tax experiments (bool)
calibrate_model = Flag to run calibration of chi values or not (bool)
------------------------------------------------------------------------
'''
get_baseline = True
calibrate_model = False
#taxes = 'baseline' # policy# #none ## some flag for do we run without taxes, with baseline 
# taxes or do we use user inputted taxes?


#globals().update(ogusa.parameters.get_parameters_from_file())
globals().update(ogusa.parameters.get_parameters(baseline=True, guid='abc123'))

# Generate Wealth data moments
output_dir = "./OUTPUT"
wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir)

# Generate labor data moments
labor.labor_data_moments(flag_graphs)


# List of parameter names that will not be changing (unless we decide to
# change them for a tax experiment)
param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
             'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
             'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
             'b_ellipse', 'k_ellipse', 'upsilon',
             'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
             'mtry_params','tau_payroll', 'tau_bq', 'calibrate_model',
             'retire', 'mean_income_data', 'g_n_vector',
             'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline',
示例#9
0
import ogusa.TPI
from ogusa import parameters, wealth, labor, demographics, income, SS, TPI
'''
------------------------------------------------------------------------
Setup
------------------------------------------------------------------------
get_baseline = Flag to run baseline or tax experiments (bool)
calibrate_model = Flag to run calibration of chi values or not (bool)
------------------------------------------------------------------------
'''

globals().update(ogusa.parameters.get_parameters())

# Generate Wealth data moments
output_dir = "./OUTPUT"
wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir)

# Generate labor data moments
labor.labor_data_moments(flag_graphs)

get_baseline = True
calibrate_model = False

# List of parameter names that will not be changing (unless we decide to
# change them for a tax experiment)
param_names = [
    'S', 'J', 'T', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma',
    'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS',
    'mindist_TPI', 'b_ellipse', 'k_ellipse', 'upsilon', 'a_tax_income',
    'chi_b_guess', 'chi_n_guess', 'b_tax_income', 'c_tax_income',
    'd_tax_income', 'tau_payroll', 'tau_bq', 'calibrate_model', 'retire',
示例#10
0
def test_sstpi():
    import tempfile
    import pickle
    import numpy as np
    import numpy as np
    import cPickle as pickle
    import os

    import ogusa

    ogusa.parameters.DATASET = "REAL"

    from ogusa.utils import comp_array
    from ogusa.utils import comp_scalar
    from ogusa.utils import dict_compare
    from ogusa.utils import pickle_file_compare

    import ogusa.SS
    import ogusa.TPI
    from ogusa import parameters, wealth, labor, demographics, income, SS, TPI

    globals().update(ogusa.parameters.get_parameters())

    # Generate Wealth data moments
    output_dir = TEST_OUTPUT
    input_dir = "./OUTPUT"
    wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir)

    # Generate labor data moments
    labor.labor_data_moments(flag_graphs, output_dir=output_dir)

    get_baseline = True
    calibrate_model = False

    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)
    param_names = [
        "S",
        "J",
        "T",
        "lambdas",
        "starting_age",
        "ending_age",
        "beta",
        "sigma",
        "alpha",
        "nu",
        "Z",
        "delta",
        "E",
        "ltilde",
        "g_y",
        "maxiter",
        "mindist_SS",
        "mindist_TPI",
        "b_ellipse",
        "k_ellipse",
        "upsilon",
        "a_tax_income",
        "chi_b_guess",
        "chi_n_guess",
        "b_tax_income",
        "c_tax_income",
        "d_tax_income",
        "tau_payroll",
        "tau_bq",
        "calibrate_model",
        "retire",
        "mean_income_data",
        "g_n_vector",
        "h_wealth",
        "p_wealth",
        "m_wealth",
        "get_baseline",
        "omega",
        "g_n_ss",
        "omega_SS",
        "surv_rate",
        "e",
        "rho",
    ]

    """
    ------------------------------------------------------------------------
        Run SS with minimization to fit chi_b and chi_n
    ------------------------------------------------------------------------
    """

    # This is the simulation before getting the replacement rate values
    sim_params = {}
    for key in param_names:
        try:
            sim_params[key] = locals()[key]
        except KeyError:
            sim_params[key] = globals()[key]

    sim_params["output_dir"] = output_dir
    sim_params["input_dir"] = input_dir
    income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(
        **sim_params
    )

    ss_outputs = SS.run_steady_state(
        ss_parameters, iterative_params, get_baseline, calibrate_model, output_dir=output_dir
    )

    """
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    """

    ss_outputs["get_baseline"] = get_baseline
    income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(
        **sim_params
    )
    ss_outputs["output_dir"] = output_dir
    ss_outputs["income_tax_params"] = income_tax_params
    ss_outputs["wealth_tax_params"] = wealth_tax_params
    ss_outputs["ellipse_params"] = ellipse_params
    ss_outputs["parameters"] = parameters
    ss_outputs["N_tilde"] = N_tilde
    ss_outputs["omega_stationary"] = omega_stationary
    ss_outputs["K0"] = K0
    ss_outputs["b_sinit"] = b_sinit
    ss_outputs["b_splus1init"] = b_splus1init
    ss_outputs["L0"] = L0
    ss_outputs["Y0"] = Y0
    ss_outputs["r0"] = r0
    ss_outputs["BQ0"] = BQ0
    ss_outputs["T_H_0"] = T_H_0
    ss_outputs["tax0"] = tax0
    ss_outputs["c0"] = c0
    ss_outputs["initial_b"] = initial_b
    ss_outputs["initial_n"] = initial_n
    ss_outputs["tau_bq"] = tau_bq
    ss_outputs["g_n_vector"] = g_n_vector
    TPI.run_time_path_iteration(**ss_outputs)

    # Platform specific exceptions:
    if sys.platform == "darwin":
        exceptions = {"tax_path": 0.08, "c_path": 0.02, "b_mat": 0.0017, "solutions": 0.005}
    else:
        exceptions = {}

    # compare results to test data
    for old, new in zip(oldfiles, newfiles):
        print "trying a pair"
        print old, new
        assert pickle_file_compare(old, new, exceptions=exceptions, relative=True)
        print "next pair"
示例#11
0
def runner(output_base, baseline_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, wealth, labor, demog, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, 
                                     start_year=user_params['start_year'], reform=reform, guid=guid)
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)


    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(run_params['lambdas'], run_params['J'], run_params['flag_graphs'], output_dir=output_base)

    # Generate labor data moments
    labor.labor_data_moments(run_params['flag_graphs'], output_dir=output_base)

    
    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']


    '''
    ------------------------------------------------------------------------
        Run SS 
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, baseline, 
                                     baseline_dir=baseline_dir)

    '''
    ------------------------------------------------------------------------
        Pickle SS results 
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_dir, "SS"))
        ss_dir = os.path.join(output_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))


    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    sim_params['input_dir'] = output_base
    sim_params['baseline_dir'] = baseline_dir
    

    income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(**sim_params)

    # ss_outputs['income_tax_params'] = income_tax_params
    # ss_outputs['wealth_tax_params'] = wealth_tax_params
    # ss_outputs['ellipse_params'] = ellipse_params
    # ss_outputs['parameters'] = parameters
    # ss_outputs['N_tilde'] = N_tilde
    # ss_outputs['omega_stationary'] = omega_stationary
    # ss_outputs['K0'] = K0
    # ss_outputs['b_sinit'] = b_sinit
    # ss_outputs['b_splus1init'] = b_splus1init
    # ss_outputs['L0'] = L0
    # ss_outputs['Y0'] = Y0
    # ss_outputs['r0'] = r0
    # ss_outputs['BQ0'] = BQ0
    # ss_outputs['T_H_0'] = T_H_0
    # ss_outputs['factor_ss'] = factor
    # ss_outputs['tax0'] = tax0
    # ss_outputs['c0'] = c0
    # ss_outputs['initial_b'] = initial_b
    # ss_outputs['initial_n'] = initial_n
    # ss_outputs['tau_bq'] = tau_bq
    # ss_outputs['g_n_vector'] = g_n_vector
    # ss_outputs['output_dir'] = output_base


    # with open("ss_outputs.pkl", 'wb') as fp:
    #     pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_TPI(income_tax_params, 
        tpi_params, iterative_params, initial_values, SS_values, output_dir=output_base)


    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)
示例#12
0
def test_sstpi():
    import tempfile
    import pickle
    import numpy as np
    import numpy as np
    import pickle as pickle
    import os

    import ogusa
    ogusa.parameters.DATASET = 'REAL'

    from ogusa.utils import comp_array
    from ogusa.utils import comp_scalar
    from ogusa.utils import dict_compare
    from ogusa.utils import pickle_file_compare

    import ogusa.SS
    import ogusa.TPI
    from ogusa import parameters, wealth, labor, demographics, income, SS, TPI

    globals().update(ogusa.parameters.get_parameters())

    # Generate Wealth data moments
    output_dir = TEST_OUTPUT
    input_dir = "./OUTPUT"
    wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir)

    # Generate labor data moments
    labor.labor_data_moments(flag_graphs, output_dir=output_dir)

    get_baseline = True
    calibrate_model = False

    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)
    param_names = [
        'S', 'J', 'T', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'b_ellipse', 'k_ellipse', 'upsilon',
        'a_tax_income', 'chi_b_guess', 'chi_n_guess', 'b_tax_income',
        'c_tax_income', 'd_tax_income', 'tau_payroll', 'tau_bq',
        'calibrate_model', 'retire', 'mean_income_data', 'g_n_vector',
        'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline', 'omega', 'g_n_ss',
        'omega_SS', 'surv_rate', 'e', 'rho'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS with minimization to fit chi_b and chi_n
    ------------------------------------------------------------------------
    '''

    # This is the simulation before getting the replacement rate values
    sim_params = {}
    for key in param_names:
        try:
            sim_params[key] = locals()[key]
        except KeyError:
            sim_params[key] = globals()[key]

    sim_params['output_dir'] = output_dir
    sim_params['input_dir'] = input_dir
    income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, \
        iterative_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_steady_state(ss_parameters,
                                     iterative_params,
                                     get_baseline,
                                     calibrate_model,
                                     output_dir=output_dir)
    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    ss_outputs['get_baseline'] = get_baseline
    income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \
        b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(
            **sim_params)
    ss_outputs['output_dir'] = output_dir
    ss_outputs['income_tax_params'] = income_tax_params
    ss_outputs['wealth_tax_params'] = wealth_tax_params
    ss_outputs['ellipse_params'] = ellipse_params
    ss_outputs['parameters'] = parameters
    ss_outputs['N_tilde'] = N_tilde
    ss_outputs['omega_stationary'] = omega_stationary
    ss_outputs['K0'] = K0
    ss_outputs['b_sinit'] = b_sinit
    ss_outputs['b_splus1init'] = b_splus1init
    ss_outputs['L0'] = L0
    ss_outputs['Y0'] = Y0
    ss_outputs['r0'] = r0
    ss_outputs['BQ0'] = BQ0
    ss_outputs['T_H_0'] = T_H_0
    ss_outputs['tax0'] = tax0
    ss_outputs['c0'] = c0
    ss_outputs['initial_b'] = initial_b
    ss_outputs['initial_n'] = initial_n
    ss_outputs['tau_bq'] = tau_bq
    ss_outputs['g_n_vector'] = g_n_vector
    TPI.run_time_path_iteration(**ss_outputs)

    # Platform specific exceptions:
    if sys.platform == "darwin":
        exceptions = {
            'tax_path': 0.08,
            'c_path': 0.02,
            'b_mat': 0.0017,
            'solutions': 0.005
        }
    else:
        exceptions = {}

    # compare results to test data
    for old, new in zip(oldfiles, newfiles):
        print("trying a pair")
        print(old, new)
        assert pickle_file_compare(old,
                                   new,
                                   exceptions=exceptions,
                                   relative=True)
        print("next pair")
def beta_estimate(beta_initial_guesses,
                  og_spec={},
                  two_step=False,
                  client=None):
    """
    This function estimates the beta_j parameters using a simulated
    method of moments estimator that targets moments of the wealth
    distribution.

    Args:
    beta_initial_guesses (array-like): array of initial guesses for the
        beta_j parameters
    og_spec (dict): any updates to default model parameters
    two_step (boolean): whether to use a two-step estimator
    client (Dask Client object): dask client for multiprocessing

    Returns:
        beta_hat (array-like): estimates of the beta_j
        beta_se (array-like): standard errors on the beta_j estimates

    """

    # initialize parametes object
    tax_func_path = os.path.join(
        "..",
        "..",
        "dynamic",
        "ogusa",
        "data",
        "tax_functions",
        "TxFuncEst_baseline_PUF.pkl",
    )
    p = Specifications(baseline=True)
    p.update_specifications(og_spec)
    p.get_tax_function_parameters(client, False, tax_func_path)

    # Compute wealth moments from the data
    scf = wealth.get_wealth_data(scf_yrs_list=[2019], web=True, directory=None)
    data_moments = wealth.compute_wealth_moments(scf, p.lambdas)

    # Get weighting matrix
    W = compute_weighting_matrix(p, optimal_weight=False)

    # call minimizer
    # set bounds on beta estimates (need to be between 0 and 1)
    bnds = np.tile(np.array([1e-12, 1]), (p.J, 1))  # Need (1e-12, 1) J times
    # pack arguments in a tuple
    min_args = (data_moments, W, p, client)
    # NOTE: may want to try some global optimization routing like
    # simulated annealing (aka basin hopping) or differential
    # evolution
    est_output = opt.minimize(
        minstat,
        beta_initial_guesses,
        args=(min_args),
        method="L-BFGS-B",
        bounds=bnds,
        tol=1e-15,
        options={
            "maxfun": 1,
            "maxiter": 1,
            "maxls": 1
        },
    )
    beta_hat = est_output["x"]

    # calculate std errors
    K = len(data_moments)
    beta_se, VCV_params = compute_se(beta_hat, W, K, p, h=0.01, client=client)

    if two_step:
        W = VCV_params
        min_args = (data_moments, W, p, client)
        est_output = opt.minimize(
            minstat,
            beta_initial_guesses,
            args=(min_args),
            method="L-BFGS-B",
            bounds=bnds,
            tol=1e-15,
            options={
                "maxfun": 1,
                "maxiter": 1,
                "maxls": 1
            },
        )
        beta_hat = est_output["x"]
        beta_se, VCV_params = compute_se(beta_hat,
                                         W,
                                         K,
                                         p,
                                         h=0.01,
                                         client=client)

    return beta_hat, beta_se
示例#14
0
def runner(output_base, input_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True):

    from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, reform=reform, guid=guid)
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    globals().update(run_params)

    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=input_dir)

    # Generate labor data moments
    labor.labor_data_moments(flag_graphs, output_dir=input_dir)

    
    get_baseline = True
    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq', 'calibrate_model',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']


    '''
    ------------------------------------------------------------------------
        Run SS with minimization to fit chi_b and chi_n
    ------------------------------------------------------------------------
    '''

    # This is the simulation before getting the replacement rate values
    sim_params = {}
    glbs = globals()
    lcls = locals()
    for key in param_names:
        if key in glbs:
            sim_params[key] = glbs[key]
        else:
            sim_params[key] = lcls[key]

    sim_params['output_dir'] = input_dir
    sim_params['run_params'] = run_params

    income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, get_baseline, calibrate_model, output_dir=input_dir)


    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    ss_outputs['get_baseline'] = get_baseline
    sim_params['input_dir'] = input_dir
    income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \
    b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params)
    ss_outputs['income_tax_params'] = income_tax_params
    ss_outputs['wealth_tax_params'] = wealth_tax_params
    ss_outputs['ellipse_params'] = ellipse_params
    ss_outputs['parameters'] = parameters
    ss_outputs['N_tilde'] = N_tilde
    ss_outputs['omega_stationary'] = omega_stationary
    ss_outputs['K0'] = K0
    ss_outputs['b_sinit'] = b_sinit
    ss_outputs['b_splus1init'] = b_splus1init
    ss_outputs['L0'] = L0
    ss_outputs['Y0'] = Y0
    ss_outputs['r0'] = r0
    ss_outputs['BQ0'] = BQ0
    ss_outputs['T_H_0'] = T_H_0
    ss_outputs['tax0'] = tax0
    ss_outputs['c0'] = c0
    ss_outputs['initial_b'] = initial_b
    ss_outputs['initial_n'] = initial_n
    ss_outputs['tau_bq'] = tau_bq
    ss_outputs['g_n_vector'] = g_n_vector
    ss_outputs['output_dir'] = input_dir


    with open("ss_outputs.pkl", 'wb') as fp:
        pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_time_path_iteration(**ss_outputs)


    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)
示例#15
0
def runner_SS(output_base, baseline_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True):

    from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, 
                                     start_year=user_params['start_year'], reform=reform, guid=guid)
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    globals().update(run_params)

    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=output_base)

    # Generate labor data moments
    labor.labor_data_moments(flag_graphs, output_dir=output_base)

    
    get_baseline = True
    calibrate_model = True
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq', 'calibrate_model',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']


    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    glbs = globals()
    lcls = locals()
    for key in param_names:
        if key in glbs:
            sim_params[key] = glbs[key]
        else:
            sim_params[key] = lcls[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, baseline, 
                                     calibrate_model, output_dir=output_base, baseline_dir=baseline_dir)
示例#16
0
def runner(output_base,
           baseline_dir,
           baseline=False,
           analytical_mtrs=True,
           age_specific=False,
           reform={},
           user_params={},
           guid='',
           run_micro=True):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, wealth, labor, demog, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline,
                                     analytical_mtrs=analytical_mtrs,
                                     age_specific=age_specific,
                                     start_year=user_params['start_year'],
                                     reform=reform,
                                     guid=guid)
    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(
            float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(run_params['lambdas'],
                           run_params['J'],
                           run_params['flag_graphs'],
                           output_dir=output_base)

    # Generate labor data moments
    labor.labor_data_moments(run_params['flag_graphs'], output_dir=output_base)

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire',
        'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth',
        'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS 
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(
        **sim_params)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_parameters,
                           iterative_params,
                           chi_params,
                           baseline,
                           baseline_dir=baseline_dir)
    '''
    ------------------------------------------------------------------------
        Pickle SS results 
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_dir, "SS"))
        ss_dir = os.path.join(output_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    sim_params['input_dir'] = output_base
    sim_params['baseline_dir'] = baseline_dir

    income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(
        **sim_params)

    # ss_outputs['income_tax_params'] = income_tax_params
    # ss_outputs['wealth_tax_params'] = wealth_tax_params
    # ss_outputs['ellipse_params'] = ellipse_params
    # ss_outputs['parameters'] = parameters
    # ss_outputs['N_tilde'] = N_tilde
    # ss_outputs['omega_stationary'] = omega_stationary
    # ss_outputs['K0'] = K0
    # ss_outputs['b_sinit'] = b_sinit
    # ss_outputs['b_splus1init'] = b_splus1init
    # ss_outputs['L0'] = L0
    # ss_outputs['Y0'] = Y0
    # ss_outputs['r0'] = r0
    # ss_outputs['BQ0'] = BQ0
    # ss_outputs['T_H_0'] = T_H_0
    # ss_outputs['factor_ss'] = factor
    # ss_outputs['tax0'] = tax0
    # ss_outputs['c0'] = c0
    # ss_outputs['initial_b'] = initial_b
    # ss_outputs['initial_n'] = initial_n
    # ss_outputs['tau_bq'] = tau_bq
    # ss_outputs['g_n_vector'] = g_n_vector
    # ss_outputs['output_dir'] = output_base

    # with open("ss_outputs.pkl", 'wb') as fp:
    #     pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_TPI(
        income_tax_params,
        tpi_params,
        iterative_params,
        initial_values,
        SS_values,
        output_dir=output_base)

    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)