Пример #1
0
def test_inner_loop():
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_inputs.pkl'))
    (outer_loop_vars_in, params, baseline, baseline_spending) = input_tuple
    ss_params, income_tax_params, chi_params, small_open_params = params
    (bssmat, nssmat, r, Y, T_H, factor) = outer_loop_vars_in
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll,
     tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T,
     p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e,
     retire, p.mean_income_data, h_wealth, p_wealth, m_wealth,
     p.b_ellipse, p.upsilon) = ss_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.alpha_T = np.ones(p.T + p.S) * alpha_T
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.lambdas = lambdas.reshape(p.J, 1)
    p.imm_rates = imm_rates.reshape(1, p.S)
    p.tax_func_type = 'DEP'
    p.baseline = baseline
    p.baseline_spending = baseline_spending
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params.reshape(
        p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(mtrx_params.reshape(
        p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(mtry_params.reshape(
        p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.chi_b, p.chi_n = chi_params
    p.small_open, firm_r, hh_r = small_open_params
    p.firm_r = np.ones(p.T + p.S) * firm_r
    p.hh_r = np.ones(p.T + p.S) * hh_r
    p.num_workers = 1
    BQ = np.ones(p.J) * 0.00019646295986015257
    outer_loop_vars = (bssmat, nssmat, r, BQ, Y, T_H, factor)
    (euler_errors, new_bmat, new_nmat, new_r, new_r_gov, new_r_hh,
     new_w, new_T_H, new_Y, new_factor, new_BQ,
     average_income_model) = SS.inner_loop(outer_loop_vars, p, None)
    test_tuple = (euler_errors, new_bmat, new_nmat, new_r, new_w,
                  new_T_H, new_Y, new_factor, new_BQ,
                  average_income_model)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert(np.allclose(test_tuple[i], v, atol=1e-05))
Пример #2
0
def parameter_read(request):
    ref_idx = request.param
    if ref_idx == "baseline":
        return (safe_read_pickle(REG_BASELINE + "/model_params.pkl"),
                safe_read_pickle(BASELINE + "/model_params.pkl"))
    else:
        return (safe_read_pickle(REG_REFORM.format(ref_idx=request.param)
                                 + "/model_params.pkl"),
                safe_read_pickle(REFORM.format(ref_idx=request.param)
                                 + "/model_params.pkl"))
Пример #3
0
def tpi_output(request):
    ref_idx = request.param
    if ref_idx == "baseline":
        return (safe_read_pickle(REG_BASELINE + "/TPI/TPI_vars.pkl"),
                safe_read_pickle(BASELINE + "/TPI/TPI_vars.pkl"))
    else:
        return (safe_read_pickle(REG_REFORM.format(ref_idx=request.param)
                                 + "/TPI/TPI_vars.pkl"),
                safe_read_pickle(REFORM.format(ref_idx=request.param)
                                 + "/TPI/TPI_vars.pkl"))
Пример #4
0
def test_twist_doughnut(file_inputs, file_outputs):
    '''
    Test TPI.twist_doughnut function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    input_tuple = utils.safe_read_pickle(file_inputs)
    test_list = TPI.twist_doughnut(*input_tuple)
    expected_list = utils.safe_read_pickle(file_outputs)

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #5
0
def test_inner_loop():
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_inputs.pkl'))
    (outer_loop_vars_in, params, baseline, baseline_spending) = input_tuple
    ss_params, income_tax_params, chi_params, small_open_params = params
    (bssmat, nssmat, r, Y, T_H, factor) = outer_loop_vars_in
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z,
     p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho,
     p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau,
     lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth,
     m_wealth, p.b_ellipse, p.upsilon) = ss_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.alpha_T = np.ones(p.T + p.S) * alpha_T
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.lambdas = lambdas.reshape(p.J, 1)
    p.imm_rates = imm_rates.reshape(1, p.S)
    p.tax_func_type = 'DEP'
    p.baseline = baseline
    p.baseline_spending = baseline_spending
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(
        etr_params.reshape(p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(
        mtrx_params.reshape(p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(
        mtry_params.reshape(p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.chi_b, p.chi_n = chi_params
    p.small_open, firm_r, hh_r = small_open_params
    p.firm_r = np.ones(p.T + p.S) * firm_r
    p.hh_r = np.ones(p.T + p.S) * hh_r
    p.num_workers = 1
    BQ = np.ones(p.J) * 0.00019646295986015257
    outer_loop_vars = (bssmat, nssmat, r, BQ, Y, T_H, factor)
    (euler_errors, new_bmat, new_nmat, new_r, new_r_gov, new_r_hh, new_w,
     new_T_H, new_Y, new_factor, new_BQ,
     average_income_model) = SS.inner_loop(outer_loop_vars, p, None)
    test_tuple = (euler_errors, new_bmat, new_nmat, new_r, new_w, new_T_H,
                  new_Y, new_factor, new_BQ, average_income_model)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v, atol=1e-05))
Пример #6
0
def tpi_output(request):
    ref_idx = request.param
    if ref_idx == "baseline":
        return (safe_read_pickle(REG_BASELINE + "/TPI/TPI_vars.pkl"),
                safe_read_pickle(BASELINE + "/TPI/TPI_vars.pkl"))
    else:
        return (safe_read_pickle(
            REG_REFORM.format(ref_idx=request.param) + "/TPI/TPI_vars.pkl"),
                safe_read_pickle(
                    REFORM.format(ref_idx=request.param) +
                    "/TPI/TPI_vars.pkl"))
Пример #7
0
def parameter_read(request):
    ref_idx = request.param
    if ref_idx == "baseline":
        return (safe_read_pickle(REG_BASELINE + "/model_params.pkl"),
                safe_read_pickle(BASELINE + "/model_params.pkl"))
    else:
        return (safe_read_pickle(
            REG_REFORM.format(ref_idx=request.param) + "/model_params.pkl"),
                safe_read_pickle(
                    REFORM.format(ref_idx=request.param) +
                    "/model_params.pkl"))
Пример #8
0
def test_inner_loop(dask_client):
    # Test TPI.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'tpi_inner_loop_inputs.pkl'))
    guesses, outer_loop_vars, params, j = input_tuple
    income_tax_params, tpi_params, initial_values, ind = params
    initial_values = initial_values[:-1]
    tpi_params = tpi_params
    p = Specifications(client=dask_client, num_workers=NUM_WORKERS)
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.eta = p.omega.reshape(p.T + p.S, p.S, 1) * p.lambdas.reshape(1, p.J)
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.tax_func_type = 'DEP'
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :]
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :]
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :]
    p.lambdas = lambdas.reshape(p.J, 1)
    p.num_workers = 1
    (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,
     p.omega_S_preTP, initial_debt) = initial_values
    initial_values_in = (K0, b_sinit, b_splus1init, factor, initial_b,
                         initial_n)
    (r, K, BQ, TR) = outer_loop_vars
    wss = firm.get_w_from_r(r[-1], p, 'SS')
    w = np.ones(p.T + p.S) * wss
    w[:p.T] = firm.get_w_from_r(r[:p.T], p, 'TPI')
    outer_loop_vars_in = (r, w, r, BQ, TR, theta)

    guesses = (guesses[0], guesses[1])
    test_tuple = TPI.inner_loop(guesses, outer_loop_vars_in,
                                initial_values_in, j, ind, p)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'tpi_inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert(np.allclose(test_tuple[i], v))
Пример #9
0
def test_inner_loop():
    # Test TPI.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/tpi_inner_loop_inputs.pkl'))
    guesses, outer_loop_vars, params, j = input_tuple
    income_tax_params, tpi_params, initial_values, ind = params
    initial_values = initial_values
    tpi_params = tpi_params
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.tax_func_type = 'DEP'
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :]
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :]
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :]
    p.lambdas = lambdas.reshape(p.J, 1)
    p.num_workers = 1
    (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,
     p.omega_S_preTP, initial_debt, D0) = initial_values
    initial_values_in = (K0, b_sinit, b_splus1init, factor, initial_b,
                         initial_n, D0)
    (r, K, BQ, T_H) = outer_loop_vars
    wss = firm.get_w_from_r(r[-1], p, 'SS')
    w = np.ones(p.T + p.S) * wss
    w[:p.T] = firm.get_w_from_r(r[:p.T], p, 'TPI')
    outer_loop_vars_in = (r, w, r, BQ, T_H, theta)

    guesses = (guesses[0], guesses[1])
    test_tuple = TPI.inner_loop(guesses, outer_loop_vars_in,
                                initial_values_in, j, ind, p)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/tpi_inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert(np.allclose(test_tuple[i], v))
Пример #10
0
def test_tax_func_loop():
    '''
    Test txfunc.tax_func_loop() function.  The test is that given
    inputs from previous run, the outputs are unchanged.

    Note that the data for this test is too large for GitHub, so it
    won't be available there.

    '''
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'tax_func_loop_inputs_large.pkl'))
    (t, micro_data, beg_yr, s_min, s_max, age_specific, analytical_mtrs,
     desc_data, graph_data, graph_est, output_dir, numparams,
     tpers) = input_tuple
    tax_func_type = 'DEP'
    # Rename and create vars to suit new micro_data var names
    micro_data['total_labinc'] = (micro_data['Wage income'] +
                                  micro_data['SE income'])
    micro_data['etr'] = (micro_data['Total tax liability'] /
                         micro_data["Adjusted total income"])
    micro_data['total_capinc'] = (micro_data['Adjusted total income'] -
                                  micro_data['total_labinc'])
    # use weighted avg for MTR labor - abs value because
    # SE income may be negative
    micro_data['mtr_labinc'] = (
        micro_data['MTR wage income'] *
        (micro_data['Wage income'] /
         (micro_data['Wage income'].abs() + micro_data['SE income'].abs())) +
        micro_data['MTR SE income'] *
        (micro_data['SE income'].abs() /
         (micro_data['Wage income'].abs() + micro_data['SE income'].abs())))
    micro_data.rename(columns={
        'Adjusted total income': 'market_income',
        'MTR capital income': 'mtr_capinc',
        'Total tax liability': 'total_tax_liab',
        'Year': 'year',
        'Age': 'age',
        'expanded_income': 'market_income',
        'Weights': 'weight'
    },
                      inplace=True)
    micro_data['payroll_tax_liab'] = 0
    test_tuple = txfunc.tax_func_loop(t, micro_data, beg_yr, s_min, s_max,
                                      age_specific, tax_func_type,
                                      analytical_mtrs, desc_data, graph_data,
                                      graph_est, output_dir, numparams)
    age_specific = False
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'tax_func_loop_outputs.pkl'))
    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v))
Пример #11
0
def test_twist_doughnut(file_inputs, file_outputs):
    '''
    Test TPI.twist_doughnut function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    input_tuple = utils.safe_read_pickle(file_inputs)
    (guesses, r, w, bq, tr, theta, factor, j, s, t, tau_c, etr_params,
     mtrx_params, mtry_params, initial_b, p) = input_tuple
    input_tuple = (guesses, r, w, bq, tr, theta, factor, j, s, t, tau_c,
                   etr_params, mtrx_params, mtry_params, initial_b, p)
    test_list = TPI.twist_doughnut(*input_tuple)
    expected_list = utils.safe_read_pickle(file_outputs)
    assert (np.allclose(np.array(test_list), np.array(expected_list)))
Пример #12
0
def test_txfunc_est():
    # Test txfunc.txfunc_est() function.  The test is that given
    # inputs from previous run, the outputs are unchanged.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/txfunc_est_inputs.pkl'))
    (df, s, t, rate_type, output_dir, graph) = input_tuple
    tax_func_type = 'DEP'
    numparams = 12
    test_tuple = txfunc.txfunc_est(df, s, t, rate_type, tax_func_type,
                                   numparams, output_dir, graph)
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/txfunc_est_outputs.pkl'))
    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v))
Пример #13
0
def txfunc_output(request):
    ref_idx = request.param
    if ref_idx == "baseline":
        reg_path = REG_BASELINE + "/TxFuncEst_{idx}.pkl".format(idx=ref_idx)
        path = BASELINE + "/TxFuncEst_{idx}.pkl".format(idx=ref_idx)

        return (safe_read_pickle(reg_path), safe_read_pickle(path))

    else:
        reg_path = (REG_REFORM.format(ref_idx=ref_idx) +
                    "/TxFuncEst_policy{idx}.pkl".format(idx=ref_idx))
        path = (REFORM.format(ref_idx=ref_idx) +
                "/TxFuncEst_policy{idx}.pkl".format(idx=ref_idx))

        return (safe_read_pickle(reg_path), safe_read_pickle(path))
Пример #14
0
def test_txfunc_est():
    # Test txfunc.txfunc_est() function.  The test is that given
    # inputs from previous run, the outputs are unchanged.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/txfunc_est_inputs.pkl'))
    (df, s, t, rate_type, output_dir, graph) = input_tuple
    tax_func_type = 'DEP'
    numparams = 12
    test_tuple = txfunc.txfunc_est(df, s, t, rate_type,
                                      tax_func_type, numparams,
                                      output_dir, graph)
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/txfunc_est_outputs.pkl'))
    for i, v in enumerate(expected_tuple):
        assert(np.allclose(test_tuple[i], v))
Пример #15
0
def test_get_initial_SS_values(baseline, param_updates, filename, dask_client):
    p = Specifications(baseline=baseline,
                       test=False,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.baseline_dir = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
    p.output_base = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
    test_tuple = TPI.get_initial_SS_values(p)
    (test_initial_values, test_ss_vars, test_theta,
     test_baseline_values) = test_tuple
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    (exp_initial_values, exp_ss_vars, exp_theta,
     exp_baseline_values) = expected_tuple

    for i, v in enumerate(exp_initial_values):
        assert (np.allclose(test_initial_values[i], v, equal_nan=True))

    if p.baseline_spending:
        for i, v in enumerate(exp_baseline_values):
            assert (np.allclose(test_baseline_values[i], v, equal_nan=True))

    assert (np.allclose(test_theta, exp_theta))

    for k, v in exp_ss_vars.items():
        assert (np.allclose(test_ss_vars[k], v, equal_nan=True))
Пример #16
0
def test_twist_doughnut():
    # Test TPI.twist_doughnut function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/twist_doughnut_inputs.pkl'))
    guesses, r, w, BQ, T_H, j, s, t, params = input_tuple
    income_tax_params, tpi_params, initial_b = params
    tpi_params = tpi_params + [True]
    income_tax_params = ('DEP',) + income_tax_params
    params = (income_tax_params, tpi_params, initial_b)
    test_list = TPI.twist_doughnut(guesses, r, w, BQ, T_H, j, s, t, params)

    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/twist_doughnut_outputs.pkl'))

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #17
0
def test_inner_loop(baseline, param_updates, filename, dask_client):
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    p.get_tax_function_parameters(None, run_micro=False)
    bssmat = np.ones((p.S, p.J)) * 0.07
    nssmat = np.ones((p.S, p.J)) * .4 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        r = p.world_int_rate[-1]
    else:
        r = 0.05
    TR = 0.12
    Y = 1.3
    factor = 100000
    BQ = np.ones(p.J) * 0.00019646295986015257
    if p.budget_balance:
        outer_loop_vars = (bssmat, nssmat, r, BQ, TR, factor)
    else:
        outer_loop_vars = (bssmat, nssmat, r, BQ, Y, TR, factor)
    test_tuple = SS.inner_loop(outer_loop_vars, p, None)
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))
    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v, atol=1e-05))
Пример #18
0
def test_run_SS(baseline, param_updates, filename, dask_client):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    if baseline is False:
        tax_func_path_baseline = os.path.join(CUR_PATH,
                                              'TxFuncEst_baseline.pkl')
        tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_policy.pkl')
        execute.runner(constants.BASELINE_DIR,
                       constants.BASELINE_DIR,
                       time_path=False,
                       baseline=True,
                       og_spec=param_updates,
                       run_micro=False,
                       tax_func_path=tax_func_path_baseline)
    else:
        tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_baseline.pkl')
    p = Specifications(baseline=baseline,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.get_tax_function_parameters(None,
                                  run_micro=False,
                                  tax_func_path=tax_func_path)
    test_dict = SS.run_SS(p, None)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Пример #19
0
def test_read_tax_func_estimate():
    specs = Specifications()
    tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_baseline.pkl')
    expected_dict = utils.safe_read_pickle(tax_func_path)
    test_dict, _ = specs.read_tax_func_estimate(tax_func_path)
    assert np.allclose(expected_dict['tfunc_avg_etr'],
                       test_dict['tfunc_avg_etr'])
Пример #20
0
def test_txfunc_est_on_GH(rate_type, tax_func_type, numparams, expected_tuple,
                          tmpdir):
    """
    Test txfunc.txfunc_est() function.  The test is that given
    inputs from previous run, the outputs are unchanged.
    """
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, "test_io_data", "txfunc_est_inputs.pkl"))
    (df, s, t, _, output_dir, graph) = input_tuple
    output_dir = tmpdir
    # Put old df variables into new df var names
    df.rename(
        columns={
            "MTR labor income": "mtr_labinc",
            "MTR capital income": "mtr_capinc",
            "Total labor income": "total_labinc",
            "Total capital income": "total_capinc",
            "ETR": "etr",
            "expanded_income": "market_income",
            "Weights": "weight",
        },
        inplace=True,
    )
    test_tuple = txfunc.txfunc_est(df, s, t, rate_type, tax_func_type,
                                   numparams, output_dir, True)

    for i, v in enumerate(expected_tuple):
        assert np.allclose(test_tuple[i], v)
Пример #21
0
def test_txfunc_est():
    '''
    Test txfunc.txfunc_est() function.  The test is that given
    inputs from previous run, the outputs are unchanged.
    '''
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'txfunc_est_inputs.pkl'))
    (df, s, t, rate_type, output_dir, graph) = input_tuple
    # Put old df variables into new df var names
    df.rename(columns={
        'MTR labor income': 'mtr_labinc',
        'MTR capital income': 'mtr_capinc',
        'Total labor income': 'total_labinc',
        'Total capital income': 'total_capinc',
        'ETR': 'etr',
        'expanded_income': 'market_income',
        'Weights': 'weight'
    },
              inplace=True)
    tax_func_type = 'DEP'
    numparams = 12
    test_tuple = txfunc.txfunc_est(df, s, t, rate_type, tax_func_type,
                                   numparams, output_dir, graph)
    expected_tuple = ((np.array([
        6.37000261e-22, 2.73401629e-03, 1.54672458e-08, 1.43446236e-02,
        2.32797367e-01, -3.69059719e-02, 1.00000000e-04, -1.01967001e-01,
        3.96030053e-02, 1.02987671e-01, -1.30433574e-01, 1.00000000e+00
    ]), 19527.16203007729, 3798))

    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v))
Пример #22
0
def test_SS_solver(baseline, param_updates, filename, dask_client):
    # Test SS.SS_solver function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    p.get_tax_function_parameters(None, run_micro=False)
    b_guess = np.ones((p.S, p.J)) * 0.07
    n_guess = np.ones((p.S, p.J)) * .35 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        rguess = p.world_int_rate[-1]
    else:
        rguess = 0.06483431412921253
    TRguess = 0.05738932081035772
    factorguess = 139355.1547340256
    BQguess = aggregates.get_BQ(rguess, b_guess, None, p, 'SS', False)
    Yguess = 0.6376591201150815

    test_dict = SS.SS_solver(b_guess, n_guess, rguess, BQguess, TRguess,
                             factorguess, Yguess, p, None, False)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        print('Testing ', k)
        assert (np.allclose(test_dict[k], v, atol=1e-07, equal_nan=True))
Пример #23
0
def test_inner_loop():
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_inputs.pkl'))
    (outer_loop_vars, params, baseline, baseline_spending) = input_tuple
    ss_params, income_tax_params, chi_params, small_open_params = params
    income_tax_params = ('DEP', ) + income_tax_params
    params = (ss_params, income_tax_params, chi_params, small_open_params)
    test_tuple = SS.inner_loop(outer_loop_vars, params, baseline,
                               baseline_spending)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v))
Пример #24
0
def test_tax_data_sample():
    """
    Test of txfunc.tax_data_sample() function
    """
    data = utils.safe_read_pickle(
        os.path.join(CUR_PATH, "test_io_data",
                     "micro_data_dict_for_tests.pkl"))
    df = txfunc.tax_data_sample(data["2030"])
    assert isinstance(df, pd.DataFrame)
Пример #25
0
def test_run_SS(input_path, expected_path):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', input_path))
    (income_tax_params, ss_params, iterative_params, chi_params,
     small_open_params, baseline, baseline_spending, baseline_dir) =\
        input_tuple
    income_tax_params = ('DEP', ) + income_tax_params
    test_dict = SS.run_SS(income_tax_params, ss_params, iterative_params,
                          chi_params, small_open_params, baseline,
                          baseline_spending, baseline_dir)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', expected_path))

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Пример #26
0
def test_SS_fsolve_reform():
    # Test SS.SS_fsolve_reform function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/SS_fsolve_reform_inputs.pkl'))
    guesses, params = input_tuple
    params = params + (None, 1)
    (bssmat, nssmat, chi_params, ss_params, income_tax_params,
     iterative_params, factor, small_open_params, client, num_workers) = params
    income_tax_params = ('DEP', ) + income_tax_params
    params = (bssmat, nssmat, chi_params, ss_params, income_tax_params,
              iterative_params, factor, small_open_params, client, num_workers)
    test_list = SS.SS_fsolve_reform(guesses, params)

    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/SS_fsolve_reform_outputs.pkl'))

    assert (np.allclose(np.array(test_list), np.array(expected_list)))
Пример #27
0
def test_tax_data_sample():
    '''
    Test of txfunc.tax_data_sample() function
    '''
    data = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'micro_data_dict_for_tests.pkl'))
    df = txfunc.tax_data_sample(data['2030'])
    assert isinstance(df, pd.DataFrame)
Пример #28
0
def test_firstdoughnutring(dask_client):
    # Test TPI.firstdoughnutring function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'firstdoughnutring_inputs.pkl'))
    guesses, r, w, b, BQ, TR, j, params = input_tuple
    income_tax_params, tpi_params, initial_b = params
    tpi_params = tpi_params + [True]
    p = Specifications(client=dask_client, num_workers=NUM_WORKERS)
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z

    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.tax_func_type = 'DEP'
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))
    p.lambdas = lambdas.reshape(p.J, 1)
    p.num_workers = 1
    bq = BQ / p.lambdas[j]
    tr = TR
    test_list = TPI.firstdoughnutring(guesses, r, w, bq, tr, theta,
                                      factor, j, initial_b, p)

    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'firstdoughnutring_outputs.pkl'))

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #29
0
def test_inner_loop():
    # Test TPI.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/tpi_inner_loop_inputs.pkl'))
    guesses, outer_loop_vars, params, j = input_tuple
    income_tax_params, tpi_params, initial_values, ind = params
    initial_values = initial_values #+ (0.0,)
    tpi_params = tpi_params #+ [True]
    income_tax_params = ('DEP',) + income_tax_params
    params = (income_tax_params, tpi_params, initial_values, ind)
    guesses = (guesses[0], guesses[1])
    test_tuple = TPI.inner_loop(guesses, outer_loop_vars, params, j)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/tpi_inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert(np.allclose(test_tuple[i], v))
Пример #30
0
def test_twist_doughnut():
    # Test TPI.twist_doughnut function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'twist_doughnut_inputs.pkl'))
    guesses, r, w, BQ, TR, j, s, t, params = input_tuple
    income_tax_params, tpi_params, initial_b = params
    tpi_params = tpi_params + [True]
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_c = np.ones((p.T + p.S, p.S, p.J)) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.tax_func_type = 'DEP'
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.lambdas = lambdas.reshape(p.J, 1)
    p.num_workers = 1
    length = int(len(guesses) / 2)
    tau_c_to_use = np.diag(p.tau_c[:p.S, :, j], p.S - (s + 2))
    bq = BQ[t:t + length] / p.lambdas[j]
    tr = TR[t:t + length]
    test_list = TPI.twist_doughnut(guesses, r, w, bq, tr, theta,
                                   factor, j, s, t, tau_c_to_use,
                                   etr_params, mtrx_params, mtry_params,
                                   initial_b, p)
    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'twist_doughnut_outputs.pkl'))

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #31
0
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client):
    '''
    Test TPI.run_TPI function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    baseline_dir = os.path.join(CUR_PATH, 'baseline')
    if baseline:
        output_base = baseline_dir
    else:
        output_base = os.path.join(CUR_PATH, 'reform')
    p = Specifications(baseline=baseline,
                       baseline_dir=baseline_dir,
                       output_base=output_base,
                       test=True,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.maxiter = 2  # this test runs through just two iterations
    p.get_tax_function_parameters(None,
                                  run_micro=False,
                                  tax_func_path=os.path.join(
                                      CUR_PATH, '..', 'data', 'tax_functions',
                                      'TxFuncEst_baseline_CPS.pkl'))

    # Need to run SS first to get results
    SS.ENFORCE_SOLUTION_CHECKS = False
    ss_outputs = SS.run_SS(p, None)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)

    TPI.ENFORCE_SOLUTION_CHECKS = False
    test_dict = TPI.run_TPI(p, None)
    expected_dict = utils.safe_read_pickle(filename)

    for k, v in expected_dict.items():
        try:
            assert (np.allclose(test_dict[k][:p.T],
                                v[:p.T],
                                rtol=1e-04,
                                atol=1e-04))
        except ValueError:
            assert (np.allclose(test_dict[k][:p.T, :, :],
                                v[:p.T, :, :],
                                rtol=1e-04,
                                atol=1e-04))
Пример #32
0
def test_twist_doughnut():
    # Test TPI.twist_doughnut function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/twist_doughnut_inputs.pkl'))
    guesses, r, w, BQ, T_H, j, s, t, params = input_tuple
    income_tax_params, tpi_params, initial_b = params
    tpi_params = tpi_params + [True]
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_c = np.ones((p.T + p.S, p.S, p.J)) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.tax_func_type = 'DEP'
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.lambdas = lambdas.reshape(p.J, 1)
    p.num_workers = 1
    length = int(len(guesses) / 2)
    tau_c_to_use = np.diag(p.tau_c[:p.S, :, j], p.S - (s + 2))
    bq = BQ[t:t + length] / p.lambdas[j]
    test_list = TPI.twist_doughnut(guesses, r, w, bq, T_H, theta,
                                   factor, j, s, t, tau_c_to_use,
                                   etr_params, mtrx_params, mtry_params,
                                   initial_b, p)
    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/twist_doughnut_outputs.pkl'))

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #33
0
def test_taxcalc_advance():
    """
    Test of the get_micro_data.taxcalc_advance() function

    Note that this test may fail if the Tax-Calculator is not v 3.0.0
    """
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, "test_io_data", "tax_dict_for_tests.pkl")
    )
    test_dict = get_micro_data.taxcalc_advance(True, 2028, {}, "cps", 2028)
    for k, v in test_dict.items():
        assert np.allclose(expected_dict[k], v, equal_nan=True)
Пример #34
0
def test_get_data():
    '''
    Test of get_micro_data.get_data() function
    '''
    expected_data = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'micro_data_dict_for_tests.pkl'))
    test_data, _ = get_micro_data.get_data(
        baseline=True, start_year=2028, reform={}, data='cps',
        client=None, num_workers=1)
    for k, v in test_data.items():
        assert_frame_equal(expected_data[k], v)
Пример #35
0
def test_SS_solver():
    # Test SS.SS_solver function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/SS_solver_inputs.pkl'))
    (b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params, baseline,
     fsolve_flag, baseline_spending) = input_tuple
    (bssmat, nssmat, chi_params, ss_params, income_tax_params,
     iterative_params, small_open_params) = params
    income_tax_params = ('DEP', ) + income_tax_params
    params = (bssmat, nssmat, chi_params, ss_params, income_tax_params,
              iterative_params, small_open_params)
    test_dict = SS.SS_solver(b_guess_init, n_guess_init, rss, T_Hss, factor_ss,
                             Yss, params, baseline, fsolve_flag,
                             baseline_spending)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/SS_solver_outputs.pkl'))

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Пример #36
0
def test_run_TPI():
    # Test TPI.run_TPI function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_inputs.pkl'))
    (income_tax_params, tpi_params, iterative_params, small_open_params,
     initial_values, SS_values, fiscal_params, biz_tax_params,
     output_dir, baseline_spending) = input_tuple
    tpi_params = tpi_params + [True]
    initial_values = initial_values + (0.0,)
    income_tax_params = ('DEP',) + income_tax_params
    test_dict = TPI.run_TPI(
        income_tax_params, tpi_params, iterative_params,
        small_open_params, initial_values, SS_values, fiscal_params,
        biz_tax_params, output_dir, baseline_spending)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_outputs.pkl'))

    for k, v in expected_dict.items():
        assert(np.allclose(test_dict[k], v))
Пример #37
0
def test_euler_equation_solver():
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/euler_eqn_solver_inputs.pkl'))
    (guesses, params) = input_tuple
    p = Specifications()
    (r, w, T_H, factor, j, p.J, p.S, p.beta, p.sigma, p.ltilde, p.g_y,
     p.g_n_ss, tau_payroll, retire, p.mean_income_data, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, j, p.chi_b,
     p.chi_n, tau_bq, p.rho, lambdas, p.omega_SS, p.e,
     p.analytical_mtrs, etr_params, mtrx_params, mtry_params) = params
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.etr_params = np.transpose(etr_params.reshape(
        p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(mtrx_params.reshape(
        p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(mtry_params.reshape(
        p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.tax_func_type = 'DEP'
    p.lambdas = lambdas.reshape(p.J, 1)
    b_splus1 = np.array(guesses[:p.S]).reshape(p.S, 1) + 0.005
    BQ = aggregates.get_BQ(r, b_splus1, j, p, 'SS', False)
    bq = household.get_bq(BQ, j, p, 'SS')
    args = (r, w, bq, T_H, factor, j, p)
    test_list = SS.euler_equation_solver(guesses, *args)

    expected_list = np.array([
        -3.62741663e+00, -6.30068841e+00, -6.76592886e+00,
        -6.97731223e+00, -7.05777777e+00, -6.57305440e+00,
        -7.11553046e+00, -7.30569622e+00, -7.45808107e+00,
        -7.89984062e+00, -8.11466111e+00, -8.28230086e+00,
        -8.79253862e+00, -8.86994311e+00, -9.31299476e+00,
        -9.80834199e+00, -9.97333771e+00, -1.08349979e+01,
        -1.13199826e+01, -1.22890930e+01, -1.31550471e+01,
        -1.42753713e+01, -1.55721098e+01, -1.73811490e+01,
        -1.88856303e+01, -2.09570569e+01, -2.30559500e+01,
        -2.52127149e+01, -2.76119605e+01, -3.03141128e+01,
        -3.30900203e+01, -3.62799730e+01, -3.91169706e+01,
        -4.24246421e+01, -4.55740527e+01, -4.92914871e+01,
        -5.30682805e+01, -5.70043846e+01, -6.06075991e+01,
        -6.45251018e+01, -6.86128365e+01, -7.35896515e+01,
        -7.92634608e+01, -8.34733231e+01, -9.29802390e+01,
        -1.01179788e+02, -1.10437881e+02, -1.20569527e+02,
        -1.31569973e+02, -1.43633399e+02, -1.57534056e+02,
        -1.73244610e+02, -1.90066728e+02, -2.07980863e+02,
        -2.27589046e+02, -2.50241670e+02, -2.76314755e+02,
        -3.04930986e+02, -3.36196973e+02, -3.70907934e+02,
        -4.10966644e+02, -4.56684022e+02, -5.06945218e+02,
        -5.61838645e+02, -6.22617808e+02, -6.90840503e+02,
        -7.67825713e+02, -8.54436805e+02, -9.51106365e+02,
        -1.05780305e+03, -1.17435473e+03, -1.30045062e+03,
        -1.43571221e+03, -1.57971603e+03, -1.73204264e+03,
        -1.88430524e+03, -2.03403679e+03, -2.17861987e+03,
        -2.31532884e+03, -8.00654731e+03, -5.21487172e-02,
        -2.80234170e-01, 4.93894552e-01, 3.11884938e-01, 6.55799607e-01,
        5.62182419e-01,  3.86074983e-01,  3.43741491e-01,  4.22461089e-01,
        3.63707951e-01,  4.93150010e-01,  4.72813688e-01,  4.07390308e-01,
        4.94974186e-01,  4.69900128e-01,  4.37562389e-01,  5.67370182e-01,
        4.88965362e-01,  6.40728461e-01,  6.14619979e-01,  4.97173823e-01,
        6.19549666e-01,  6.51193557e-01,  4.48906118e-01,  7.93091492e-01,
        6.51249363e-01,  6.56307713e-01,  1.12948552e+00,  9.50018058e-01,
        6.79613030e-01,  9.51359123e-01,  6.31059147e-01,  7.97896887e-01,
        8.44620817e-01,  7.43683837e-01,  1.56693187e+00,  2.75630011e-01,
        5.32956891e-01,  1.57110727e+00,  1.22674610e+00, 4.63932928e-01,
        1.47225464e+00,  1.16948107e+00,  1.07965795e+00, -3.20557791e-01,
        -1.17064127e+00, -7.84880649e-01, -7.60851182e-01, -1.61415945e+00,
        -8.30363975e-01, -1.68459409e+00, -1.49260581e+00, -1.84257084e+00,
        -1.72143079e+00, -1.43131579e+00, -1.63719219e+00, -1.43874851e+00,
        -1.57207905e+00, -1.72909159e+00, -1.98778122e+00, -1.80843826e+00,
        -2.12828312e+00, -2.24768762e+00, -2.36961877e+00, -2.49117258e+00,
        -2.59914065e+00, -2.82309085e+00, -2.93613362e+00, -3.34446991e+00,
        -3.45445086e+00, -3.74962140e+00, -3.78113417e+00, -4.55643800e+00,
        -4.86929016e+00, -5.08657898e+00, -5.22054177e+00, -5.54606515e+00,
        -5.78478304e+00, -5.93652041e+00, -6.11519786e+00])

    assert(np.allclose(np.array(test_list), np.array(expected_list)))
Пример #38
0
def dump_diff_output(baseline_dir, policy_dir):
    '''
    --------------------------------------------------------------------
    This function reads the pickles with the SS and time path results
    from the baseline and reform and then calculates the percentage
    differences between the two for each year in the 10-year budget
    window, over the entire budget window, and in the SS.
    --------------------------------------------------------------------

    INPUTS:
    baseline_dir = string, path for directory with baseline policy results
    policy_dir   = string, path for directory with reform policy results

    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: None

    OBJECTS CREATED WITHIN FUNCTION:
    tpi_macro_vars_policy_path   = string, path to pickle with time path
                                    results for reform
    tpi_macro_vars_policy        = dictionary, dictionary with numpy arrays of
                                    results from transition path equilibrium for reform
    tpi_macro_vars_baseline_path = string, path to pickle with time path
                                    results for baseline policy
    tpi_macro_vars_baseline      = dictionary, dictionary with numpy arrays of
                                    results from transition path equilibrium for baseline policy
    baseline_macros              = [7,T] array, numpy array with time path for relevant macro
                                    variables from baseline equilibrium
    policy_macros                = [7,T] array, numpy array with time path for relevant macro
                                    variables from reform equilibrium
    pct_changes                  = [7,12] array, numpy array with pct changes in macro variables
                                    from baseline to reform for each year
                                    in the budget window (10 years), over all 10 years, and in the SS
    ss_policy_path               = string, path to pickle of SS results for reform
    ss_policy                    = dictionary, dictionary with numpy arrays of results from
                                    SS equilibrium for reform
    ss_baseline_path             = string, path to pickle of SS results for baseline
    ss_baseline                  = dictionary, dictionary with numpy arrays of results from
                                    SS equilibrium for baseline

    RETURNS: pct_changes
    --------------------------------------------------------------------
    '''

    # read macro output
    tpi_baseline_dir = os.path.join(baseline_dir, "TPI")
    tpi_policy_dir = os.path.join(policy_dir, "TPI")
    if not os.path.exists(tpi_policy_dir):
        os.mkdir(tpi_policy_dir)
    tpi_macro_vars_policy_path = os.path.join(tpi_policy_dir,
                                              "TPI_vars.pkl")
    tpi_macro_vars_policy = safe_read_pickle(tpi_macro_vars_policy_path)
    tpi_macro_vars_baseline_path = os.path.join(tpi_baseline_dir,
                                                "TPI_vars.pkl")
    tpi_macro_vars_baseline = safe_read_pickle(tpi_macro_vars_baseline_path)

    T = len(tpi_macro_vars_baseline['C'])
    baseline_macros = np.zeros((7, T))
    baseline_macros[0, :] = tpi_macro_vars_baseline['Y'][:T]
    baseline_macros[1, :] = tpi_macro_vars_baseline['C'][:T]
    baseline_macros[2, :] = tpi_macro_vars_baseline['I'][:T]
    baseline_macros[3, :] = tpi_macro_vars_baseline['L'][:T]
    baseline_macros[4, :] = tpi_macro_vars_baseline['w'][:T]
    baseline_macros[5, :] = tpi_macro_vars_baseline['r'][:T]
    baseline_macros[6, :] = tpi_macro_vars_baseline['total_revenue'][:T]

    policy_macros = np.zeros((7, T))
    policy_macros[0, :] = tpi_macro_vars_policy['Y'][:T]
    policy_macros[1, :] = tpi_macro_vars_policy['C'][:T]
    policy_macros[2, :] = tpi_macro_vars_policy['I'][:T]
    policy_macros[3, :] = tpi_macro_vars_policy['L'][:T]
    policy_macros[4, :] = tpi_macro_vars_policy['w'][:T]
    policy_macros[5, :] = tpi_macro_vars_policy['r'][:T]
    policy_macros[6, :] = tpi_macro_vars_policy['total_revenue'][:T]

    pct_changes = np.zeros((7, 12))
    # pct changes for each year in budget window
    pct_changes[:, :10] = ((policy_macros-baseline_macros) /
                           policy_macros)[:, :10]
    # pct changes over entire budget window
    pct_changes[:, 10] = ((policy_macros[:, :10].sum(axis=1) -
                           baseline_macros[:, :10].sum(axis=1)) /
                          policy_macros[:, :10].sum(axis=1))

    # Load SS results
    ss_policy_path = os.path.join(policy_dir, "SS", "SS_vars.pkl")
    ss_policy = safe_read_pickle(ss_policy_path)
    ss_baseline_path = os.path.join(baseline_dir, "SS", "SS_vars.pkl")
    ss_baseline = safe_read_pickle(ss_baseline_path)
    # pct changes in macro aggregates in SS
    pct_changes[0, 11] = ((ss_policy['Yss'] - ss_baseline['Yss']) /
                          ss_baseline['Yss'])
    pct_changes[1, 11] = ((ss_policy['Css'] - ss_baseline['Css']) /
                          ss_baseline['Css'])
    pct_changes[2, 11] = ((ss_policy['Iss'] - ss_baseline['Iss']) /
                          ss_baseline['Iss'])
    pct_changes[3, 11] = ((ss_policy['Lss'] - ss_baseline['Lss']) /
                          ss_baseline['Lss'])
    pct_changes[4, 11] = ((ss_policy['wss'] - ss_baseline['wss']) /
                          ss_baseline['wss'])
    pct_changes[5, 11] = ((ss_policy['rss'] - ss_baseline['rss']) /
                          ss_baseline['rss'])
    pct_changes[6, 11] = ((ss_policy['total_revenue_ss'] -
                           ss_baseline['total_revenue_ss']) /
                          ss_baseline['total_revenue_ss'])

    return pct_changes, baseline_macros, policy_macros
Пример #39
0
from __future__ import print_function
import pytest
import numpy as np
import os
from ogusa import SS, utils, aggregates, household
from ogusa.parameters import Specifications
CUR_PATH = os.path.abspath(os.path.dirname(__file__))

input_tuple = utils.safe_read_pickle(
    os.path.join(CUR_PATH, 'test_io_data/SS_fsolve_inputs.pkl'))
guesses_in, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, chi_params, ss_params, income_tax_params,
 iterative_params, small_open_params, client, num_workers) = params
p1 = Specifications()
(p1.J, p1.S, p1.T, p1.BW, p1.beta, p1.sigma, p1.alpha, p1.gamma, p1.epsilon,
 Z, p1.delta, p1.ltilde, p1.nu, p1.g_y, p1.g_n_ss, tau_payroll,
 tau_bq, p1.rho, p1.omega_SS, p1.budget_balance, alpha_T,
 p1.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p1.e,
 retire, p1.mean_income_data, h_wealth, p_wealth, m_wealth,
 p1.b_ellipse, p1.upsilon) = ss_params
p1.Z = np.ones(p1.T + p1.S) * Z
p1.tau_bq = np.ones(p1.T + p1.S) * 0.0
p1.tau_payroll = np.ones(p1.T + p1.S) * tau_payroll
p1.alpha_T = np.ones(p1.T + p1.S) * alpha_T
p1.tau_b = np.ones(p1.T + p1.S) * tau_b
p1.delta_tau = np.ones(p1.T + p1.S) * delta_tau
p1.h_wealth = np.ones(p1.T + p1.S) * h_wealth
p1.p_wealth = np.ones(p1.T + p1.S) * p_wealth
p1.m_wealth = np.ones(p1.T + p1.S) * m_wealth
p1.retire = (np.ones(p1.T + p1.S) * retire).astype(int)
Пример #40
0
def test_run_SS(input_path, expected_path):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', input_path))
    (income_tax_params, ss_params, iterative_params, chi_params,
     small_open_params, baseline, baseline_spending, baseline_dir) =\
        input_tuple
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll,
     tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T,
     p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e,
     retire, p.mean_income_data, h_wealth, p_wealth, m_wealth,
     p.b_ellipse, p.upsilon) = ss_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.alpha_T = np.ones(p.T + p.S) * alpha_T
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.lambdas = lambdas.reshape(p.J, 1)
    p.imm_rates = imm_rates.reshape(1, p.S)
    p.tax_func_type = 'DEP'
    p.baseline = baseline
    p.baseline_spending = baseline_spending
    p.baseline_dir = baseline_dir
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params.reshape(
        p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(mtrx_params.reshape(
        p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(mtry_params.reshape(
        p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.maxiter, p.mindist_SS = iterative_params
    p.chi_b, p.chi_n = chi_params
    p.small_open, firm_r, hh_r = small_open_params
    p.firm_r = np.ones(p.T + p.S) * firm_r
    p.hh_r = np.ones(p.T + p.S) * hh_r
    p.num_workers = 1
    test_dict = SS.run_SS(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', expected_path))

    # delete values key-value pairs that are not in both dicts
    del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b']
    del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss']
    test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] -
                                       test_dict['business_revenue'])
    del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss']
    del test_dict['resource_constraint_error'], test_dict['T_Css']
    del test_dict['r_gov_ss'], test_dict['r_hh_ss']
    test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss')

    for k, v in expected_dict.items():
        assert(np.allclose(test_dict[k], v))
Пример #41
0
def test_run_TPI():
    # Test TPI.run_TPI function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_inputs.pkl'))
    (income_tax_params, tpi_params, iterative_params, small_open_params,
     initial_values, SS_values, fiscal_params, biz_tax_params,
     output_dir, baseline_spending) = input_tuple
    tpi_params = tpi_params + [True]
    initial_values = initial_values + (0.0,)

    p = Specifications()
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params

    new_param_values = {
        'J': J,
        'S': S,
        'T': T
    }
    # update parameters instance with new values for test
    p.update_specifications(new_param_values, raise_errors=False)
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.small_open, ss_firm_r, ss_hh_r = small_open_params
    p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r
    p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r
    p.maxiter, p.mindist_SS, p.mindist_TPI = iterative_params
    (p.budget_balance, alpha_T, alpha_G, p.tG1, p.tG2, p.rho_G,
     p.debt_ratio_ss) = fiscal_params
    p.alpha_T = np.concatenate((alpha_T, np.ones(40) * alpha_T[-1]))
    p.alpha_G = np.concatenate((alpha_G, np.ones(40) * alpha_G[-1]))
    (tau_b, delta_tau) = biz_tax_params
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :]
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :]
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :]
    p.lambdas = lambdas.reshape(p.J, 1)
    p.output = output_dir
    p.baseline_spending = baseline_spending
    p.num_workers = 1
    (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,
     p.omega_S_preTP, initial_debt, D0) = initial_values

    # Need to run SS first to get results
    ss_outputs = SS.run_SS(p, None)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))

    test_dict = TPI.run_TPI(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_outputs.pkl'))

    # delete values key-value pairs that are not in both dicts
    del test_dict['etr_path'], test_dict['mtrx_path'], test_dict['mtry_path']
    del test_dict['bmat_s']
    test_dict['b_mat'] = test_dict.pop('bmat_splus1')
    test_dict['REVENUE'] = test_dict.pop('total_revenue')
    test_dict['IITpayroll_revenue'] = (test_dict['REVENUE'][:160] -
                                       test_dict['business_revenue'])
    del test_dict['T_P'], test_dict['T_BQ'], test_dict['T_W']
    del test_dict['resource_constraint_error'], test_dict['T_C']
    del test_dict['r_gov'], test_dict['r_hh']

    for k, v in expected_dict.items():
        try:
            assert(np.allclose(test_dict[k], v, rtol=1e-04, atol=1e-04))
        except ValueError:
            assert(np.allclose(test_dict[k], v[:p.T, :, :], rtol=1e-04,
                               atol=1e-04))