예제 #1
0
def test_SS_solver_extra(baseline, param_updates, filename, dask_client):
    # Test SS.SS_solver function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    b_guess = np.ones((p.S, p.J)) * 0.07
    n_guess = np.ones((p.S, p.J)) * .35 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        rguess = p.world_int_rate[-1]
    else:
        rguess = 0.06483431412921253
    TRguess = 0.05738932081035772
    factorguess = 139355.1547340256
    BQguess = aggregates.get_BQ(rguess, b_guess, None, p, 'SS', False)
    Yguess = 0.6376591201150815

    test_dict = SS.SS_solver(b_guess, n_guess, rguess, BQguess, TRguess,
                             factorguess, Yguess, p, dask_client, False)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        print('Testing ', k)
        assert (np.allclose(test_dict[k], v, atol=1e-05, equal_nan=True))
예제 #2
0
def test_D_G_path(baseline_spending, Y, TR, Revenue, Gbaseline, budget_balance,
                  expected_tuple):
    p = Specifications()
    new_param_values = {
        'T': 320,
        'S': 80,
        'debt_ratio_ss': 1.2,
        'tG1': 20,
        'tG2': 256,
        'alpha_T': [0.09],
        'alpha_G': [0.05],
        'rho_G': 0.1,
        'g_y_annual': 0.03,
        'baseline_spending': baseline_spending,
        'budget_balance': budget_balance
    }
    p.update_specifications(new_param_values, raise_errors=False)
    r_gov = np.ones(p.T + p.S) * 0.03
    p.g_n = np.ones(p.T + p.S) * 0.02
    D0_baseline = 0.59
    Gbaseline[0] = 0.05
    net_revenue = Revenue
    pension_amount = np.zeros_like(net_revenue)
    UBI_outlays = np.zeros_like(net_revenue)
    dg_fixed_values = (Y, Revenue, pension_amount, UBI_outlays, TR, Gbaseline,
                       D0_baseline)
    test_tuple = fiscal.D_G_path(r_gov, dg_fixed_values, p)
    for i, v in enumerate(test_tuple):
        assert np.allclose(v[:p.T], expected_tuple[i][:p.T])
예제 #3
0
def test_inner_loop(baseline, param_updates, filename, dask_client):
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    bssmat = np.ones((p.S, p.J)) * 0.07
    nssmat = np.ones((p.S, p.J)) * .4 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        r = p.world_int_rate[-1]
    else:
        r = 0.05
    TR = 0.12
    Y = 1.3
    factor = 100000
    BQ = np.ones(p.J) * 0.00019646295986015257
    if p.budget_balance:
        outer_loop_vars = (bssmat, nssmat, r, BQ, TR, factor)
    else:
        outer_loop_vars = (bssmat, nssmat, r, BQ, Y, TR, factor)
    test_tuple = SS.inner_loop(outer_loop_vars, p, dask_client)
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))
    for i, v in enumerate(expected_tuple):
        print('Max diff = ', np.absolute(test_tuple[i] - v).max())
        print('Checking item = ', i)
        assert (np.allclose(test_tuple[i], v, atol=4e-05))
예제 #4
0
def test_sigma(sigma, dask_client):
    og_spec = {'frisch': 0.41, 'debt_ratio_ss': 1.0, 'sigma': sigma}
    p = Specifications(baseline=True,
                       num_workers=NUM_WORKERS,
                       baseline_dir=OUTPUT_DIR,
                       output_base=OUTPUT_DIR)
    p.update_specifications(og_spec)
    runner(p, time_path=False, client=dask_client)
예제 #5
0
def test_update_specifications_with_dict():
    spec = Specifications()
    new_spec_dict = {
        'frisch': 0.3,
    }
    spec.update_specifications(new_spec_dict)
    assert spec.frisch == 0.3
    assert len(spec.errors) == 0
예제 #6
0
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client):
    '''
    Test TPI.run_TPI function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    baseline_dir = os.path.join(CUR_PATH, 'baseline')
    if baseline:
        output_base = baseline_dir
    else:
        output_base = os.path.join(CUR_PATH, 'reform')
    p = Specifications(baseline=baseline,
                       baseline_dir=baseline_dir,
                       output_base=output_base,
                       num_workers=NUM_WORKERS)
    test_params = TEST_PARAM_DICT.copy()
    test_params.update(param_updates)
    p.update_specifications(test_params)
    p.maxiter = 2  # this test runs through just two iterations

    # Need to run SS first to get results
    SS.ENFORCE_SOLUTION_CHECKS = False
    ss_outputs = SS.run_SS(p, client=dask_client)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)

    TPI.ENFORCE_SOLUTION_CHECKS = False
    test_dict = TPI.run_TPI(p, client=dask_client)
    expected_dict = utils.safe_read_pickle(filename)

    for k, v in expected_dict.items():
        print('Max diff in ', k, ' = ')
        try:
            print(np.absolute(test_dict[k][:p.T] - v[:p.T]).max())
        except ValueError:
            print(np.absolute(test_dict[k][:p.T, :, :] - v[:p.T, :, :]).max())

    for k, v in expected_dict.items():
        try:
            assert (np.allclose(test_dict[k][:p.T],
                                v[:p.T],
                                rtol=1e-04,
                                atol=1e-04))
        except ValueError:
            assert (np.allclose(test_dict[k][:p.T, :, :],
                                v[:p.T, :, :],
                                rtol=1e-04,
                                atol=1e-04))
예제 #7
0
def test_implement_reform():
    specs = Specifications()
    new_specs = {'tG1': 30, 'T': 80, 'frisch': 0.3, 'tax_func_type': 'DEP'}

    specs.update_specifications(new_specs)
    assert specs.frisch == 0.3
    assert specs.tG1 == 30
    assert specs.T == 80
    assert specs.tax_func_type == 'DEP'
    assert len(specs.errors) == 0
예제 #8
0
def test_update_specification_with_json():
    spec = Specifications()
    new_spec_json = """
        {
            "frisch": 0.3
        }
    """
    spec.update_specifications(new_spec_json)
    assert spec.frisch == 0.3
    assert len(spec.errors) == 0
예제 #9
0
def test_implement_bad_reform2():
    specs = Specifications()
    # tG1 has an upper bound at T / 2
    new_specs = {'T': 80, 'tax_func_type': 'not_a_functional_form'}

    specs.update_specifications(new_specs, raise_errors=False)

    assert len(specs.errors) > 0
    assert specs.errors['tax_func_type'][0] == (
        'tax_func_type "not_a_functional_form" must be in list of ' +
        'choices DEP, DEP_totalinc, GS, linear.')
예제 #10
0
def test_implement_bad_reform1():
    specs = Specifications()
    # tG1 has an upper bound at T / 2
    new_specs = {
        'tG1': 50,
        'T': 80,
    }

    specs.update_specifications(new_specs, raise_errors=False)

    assert len(specs.errors) == 0
예제 #11
0
def test_run_small(time_path, dask_client):
    from ogcore.execute import runner
    # Monkey patch enforcement flag since small data won't pass checks
    SS.ENFORCE_SOLUTION_CHECKS = False
    TPI.ENFORCE_SOLUTION_CHECKS = False
    SS.MINIMIZER_TOL = 1e-6
    TPI.MINIMIZER_TOL = 1e-6
    p = Specifications(baseline=True,
                       num_workers=NUM_WORKERS,
                       baseline_dir=OUTPUT_DIR,
                       output_base=OUTPUT_DIR)
    p.update_specifications(TEST_PARAM_DICT)
    runner(p, time_path=time_path, client=dask_client)
예제 #12
0
def test_get_D_ss(budget_balance, expected_tuple):
    '''
    Test of the fiscla.get_D_ss() function.
    '''
    r_gov = 0.03
    Y = 1.176255339
    p = Specifications()
    p.debt_ratio_ss = 1.2
    p.budget_balance = budget_balance
    p.g_n_ss = 0.02
    test_tuple = fiscal.get_D_ss(r_gov, Y, p)

    for i, v in enumerate(test_tuple):
        assert np.allclose(v, expected_tuple[i])
예제 #13
0
파일: test_tax.py 프로젝트: jpycroft/OG-USA
def test_get_biz_tax():
    # Test function for business tax receipts
    p = Specifications()
    new_param_values = {
        'cit_rate': [0.20],
        'delta_tau_annual': [0.06]
    }
    p.update_specifications(new_param_values)
    p.T = 3
    w = np.array([1.2, 1.1, 1.2])
    Y = np.array([3.0, 7.0, 3.0])
    L = np.array([2.0, 3.0, 2.0])
    K = np.array([5.0, 6.0, 5.0])
    biz_tax = tax.get_biz_tax(w, Y, L, K, p, 'TPI')
    assert np.allclose(biz_tax, np.array([0.0102, 0.11356, 0.0102]))
예제 #14
0
def test_get_G_ss(budget_balance, expected_G):
    '''
    Test of the fiscla.get_G_ss() function.
    '''
    Y = 2.2
    net_revenue = 2.3
    pension_amount = 0.0
    TR = 1.6
    UBI = 0.0
    new_borrowing = 0.072076633
    debt_service = 0.042345192
    p = Specifications()
    p.budget_balance = budget_balance
    test_G = fiscal.get_G_ss(Y, net_revenue, pension_amount, TR, UBI,
                             new_borrowing, debt_service, p)

    assert np.allclose(test_G, expected_G)
예제 #15
0
def test_get_y():
    '''
    Test of household.get_y() function.
    '''
    r_hh = np.array([0.05, 0.04, 0.09])
    w = np.array([1.2, 0.8, 2.5])
    b_s = np.array([0.5, 0.99, 9])
    n = np.array([0.8, 3.2, 0.2])
    expected_y = np.array([0.9754, 3.8796, 0.91])
    p = Specifications()
    # p.update_specifications({'S': 4, 'J': 1})
    p.S = 3
    p.e = np.array([0.99, 1.5, 0.2])

    test_y = household.get_y(r_hh, w, b_s, n, p)

    assert np.allclose(test_y, expected_y)
예제 #16
0
def validate_inputs(meta_param_dict, adjustment, errors_warnings):
    # ogusa doesn't look at meta_param_dict for validating inputs.
    params = Specifications()
    params.adjust(adjustment["OG-USA Parameters"], raise_errors=False)
    errors_warnings["OG-USA Parameters"]["errors"].update(params.errors)
    # Validate TC parameter inputs
    pol_params = {}
    # drop checkbox parameters.
    for param, data in list(adjustment["Tax-Calculator Parameters"].items()):
        if not param.endswith("checkbox"):
            pol_params[param] = data
    iit_params = TCParams()
    iit_params.adjust(pol_params, raise_errors=False)
    errors_warnings["Tax-Calculator Parameters"]["errors"].update(
        iit_params.errors
    )

    return {"errors_warnings": errors_warnings}
예제 #17
0
def test_resource_constraint():
    """
    Test resource constraint equation.
    """
    p = Specifications()
    p.delta = 0.05
    Y = np.array([48, 55, 2, 99, 8])
    C = np.array([33, 44, 0.4, 55, 6])
    G = np.array([4, 5, 0.01, 22, 0])
    I = np.array([20, 5, 0.6, 10, 1])
    K_f = np.array([0, 0, 0.2, 3, 0.05])
    new_borrowing_f = np.array([0, 0.1, 0.3, 4, 0.5])
    debt_service_f = np.array([0.1, 0.1, 0.3, 2, 0.02])
    r = np.array([0.03, 0.04, 0.03, 0.06, 0.01])
    expected = np.array([-9.1, 1, 0.974, 13.67, 1.477])
    test_RC = aggr.resource_constraint(Y, C, G, I, K_f, new_borrowing_f,
                                       debt_service_f, r, p)

    assert (np.allclose(test_RC, expected))
예제 #18
0
def test_get_initial_SS_values(baseline, param_updates, filename, dask_client):
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.baseline_dir = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
    p.output_base = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
    test_tuple = TPI.get_initial_SS_values(p)
    (test_initial_values, test_ss_vars, test_theta,
     test_baseline_values) = test_tuple
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    (exp_initial_values, exp_ss_vars, exp_theta,
     exp_baseline_values) = expected_tuple
    (B0, b_sinit, b_splus1init, factor, initial_b,
     initial_n) = exp_initial_values
    B0 = aggr.get_B(exp_ss_vars['bssmat_splus1'], p, 'SS', True)
    initial_b = (exp_ss_vars['bssmat_splus1'] * (exp_ss_vars['Bss'] / B0))
    B0 = aggr.get_B(initial_b, p, 'SS', True)
    b_sinit = np.array(
        list(np.zeros(p.J).reshape(1, p.J)) + list(initial_b[:-1]))
    b_splus1init = initial_b
    exp_initial_values = (B0, b_sinit, b_splus1init, factor, initial_b,
                          initial_n)

    for i, v in enumerate(exp_initial_values):
        assert (np.allclose(test_initial_values[i], v, equal_nan=True))

    if p.baseline_spending:
        for i, v in enumerate(exp_baseline_values):
            assert (np.allclose(test_baseline_values[i], v, equal_nan=True))

    assert (np.allclose(test_theta, exp_theta))

    for k, v in exp_ss_vars.items():
        assert (np.allclose(test_ss_vars[k], v, equal_nan=True))
예제 #19
0
def run_micro_macro(og_spec, guid, client):

    guid = ''
    start_time = time.time()

    REFORM_DIR = os.path.join(CUR_PATH, "OUTPUT_REFORM_" + guid)
    BASELINE_DIR = os.path.join(CUR_PATH, "OUTPUT_BASELINE" + guid)

    with open("log_{}.log".format(guid), 'w') as f:
        f.write("guid: {}\n".format(guid))
        f.write("og_spec: {}\n".format(og_spec))

    '''
    ------------------------------------------------------------------------
        Run baseline
    ------------------------------------------------------------------------
    '''
    p = Specifications(
        baseline=True, num_workers=NUM_WORKERS,
        baseline_dir=BASELINE_DIR, output_base=BASELINE_DIR)
    p.update_specifications(og_spec)
    runner(p, time_path=True, client=client)

    '''
    ------------------------------------------------------------------------
        Run reform
    ------------------------------------------------------------------------
    '''
    p = Specifications(
        baseline=False, num_workers=NUM_WORKERS,
        baseline_dir=BASELINE_DIR, output_base=REFORM_DIR)
    p.update_specifications(og_spec)
    runner(p, time_path=True, client=client)
    time.sleep(0.5)
    base_tpi = safe_read_pickle(
        os.path.join(BASELINE_DIR, 'TPI', 'TPI_vars.pkl'))
    base_params = safe_read_pickle(
        os.path.join(BASELINE_DIR, 'model_params.pkl'))
    reform_tpi = safe_read_pickle(
        os.path.join(REFORM_DIR, 'TPI', 'TPI_vars.pkl'))
    reform_params = safe_read_pickle(
        os.path.join(REFORM_DIR, 'model_params.pkl'))
    ans = ot.macro_table(
        base_tpi, base_params, reform_tpi=reform_tpi,
        reform_params=reform_params,
        var_list=['Y', 'C', 'K', 'L', 'r', 'w'], output_type='pct_diff',
        num_years=10, start_year=base_params.start_year)
    print("total time was ", (time.time() - start_time))

    return ans
예제 #20
0
def test_get_TR(baseline, budget_balance, baseline_spending, method,
                expected_TR):
    '''
    Test of the fiscal.get_TR() function.
    '''
    Y = 3.2
    TR = 1.5
    G = 0.0
    agg_pension_outlays = 0.0
    UBI_outlays = 0.0
    total_tax_revenue = 1.9
    p = Specifications(baseline=baseline)
    p.budget_balance = budget_balance
    p.baseline_spending = baseline_spending
    if method == 'TPI':
        Y = np.ones(p.T * p.S) * Y
        TR = np.ones(p.T * p.S) * TR
        total_tax_revenue = np.ones(p.T * p.S) * total_tax_revenue
    test_TR = fiscal.get_TR(Y, TR, G, total_tax_revenue, agg_pension_outlays,
                            UBI_outlays, p, method)

    assert np.allclose(test_TR, expected_TR)
예제 #21
0
def test_twist_doughnut(file_inputs, file_outputs):
    '''
    Test TPI.twist_doughnut function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    input_tuple = utils.safe_read_pickle(file_inputs)
    (guesses, r, w, bq, tr, theta, factor, ubi, j, s, t, tau_c, etr_params,
     mtrx_params, mtry_params, initial_b) = input_tuple
    p = Specifications()
    input_tuple = (guesses, r, w, bq, tr, theta, factor, ubi, j, s, t, tau_c,
                   etr_params, mtrx_params, mtry_params, initial_b, p)
    test_list = TPI.twist_doughnut(*input_tuple)
    expected_list = utils.safe_read_pickle(file_outputs)
    assert (np.allclose(np.array(test_list), np.array(expected_list)))
예제 #22
0
def get_inputs(meta_param_dict):
    meta_params = MetaParams()
    meta_params.adjust(meta_param_dict)
    # Set default OG-USA parameters
    ogusa_params = Specifications()
    ogusa_params.update_specifications(
        json.load(
            open(
                os.path.join(
                    "..", "..", "ogusa", "ogusa_default_parameters.json"
                )
            )
        )
    )
    ogusa_params.start_year = meta_params.year
    filtered_ogusa_params = OrderedDict()
    filter_list = [
        "chi_n_80",
        "chi_b",
        "eta",
        "zeta",
        "constant_demographics",
        "ltilde",
        "use_zeta",
        "constant_rates",
        "zero_taxes",
        "analytical_mtrs",
        "age_specific",
        "gamma",
        "epsilon",
        "start_year",
    ]
    for k, v in ogusa_params.dump().items():
        if (
            (k not in filter_list)
            and (v.get("section_1", False) != "Model Solution Parameters")
            and (v.get("section_2", False) != "Model Dimensions")
        ):
            filtered_ogusa_params[k] = v
            print("filtered ogusa = ", k)
    # Set default TC params
    iit_params = TCParams()
    iit_params.set_state(year=meta_params.year.tolist())
    filtered_iit_params = OrderedDict()
    for k, v in iit_params.dump().items():
        if k == "schema" or v.get("section_1", False):
            filtered_iit_params[k] = v

    default_params = {
        "OG-USA Parameters": filtered_ogusa_params,
        "Tax-Calculator Parameters": filtered_iit_params,
    }

    return {
        "meta_parameters": meta_params.dump(),
        "model_parameters": default_params,
    }
예제 #23
0
def test_inner_loop():
    # Test TPI.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'tpi_inner_loop_inputs.pkl'))
    guesses, outer_loop_vars, initial_values, ubi, j, ind = input_tuple
    p = Specifications()
    test_tuple = TPI.inner_loop(guesses, outer_loop_vars, initial_values, ubi,
                                j, ind, p)

    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'tpi_inner_loop_outputs.pkl'))

    for i, v in enumerate(expected_tuple):
        assert (np.allclose(test_tuple[i], v))
예제 #24
0
def test_firstdoughnutring():
    # Test TPI.firstdoughnutring function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'firstdoughnutring_inputs.pkl'))
    guesses, r, w, bq, tr, theta, factor, ubi, j, initial_b = input_tuple
    p = Specifications()
    test_list = TPI.firstdoughnutring(guesses, r, w, bq, tr, theta, factor,
                                      ubi, j, initial_b, p)

    expected_list = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data',
                     'firstdoughnutring_outputs.pkl'))

    assert (np.allclose(np.array(test_list), np.array(expected_list)))
예제 #25
0
def test_constant_demographics_TPI(dask_client):
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesses for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    # Create output directory structure
    spec = Specifications(output_base=CUR_PATH,
                          baseline_dir=OUTPUT_DIR,
                          baseline=True,
                          num_workers=NUM_WORKERS)
    og_spec = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2,
        'r_gov_shift': 0.0,
        'zeta_D': [0.0, 0.0],
        'zeta_K': [0.0, 0.0],
        'debt_ratio_ss': 1.0,
        'initial_foreign_debt_ratio': 0.0,
        'start_year': 2019,
        'cit_rate': [0.0],
        'PIA_rate_bkt_1': 0.0,
        'PIA_rate_bkt_2': 0.0,
        'PIA_rate_bkt_3': 0.0,
        'eta':
        (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J))
    }
    spec.update_specifications(og_spec)
    spec.etr_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.etr_params.shape[2]))
    spec.mtrx_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.mtrx_params.shape[2]))
    spec.mtry_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.mtry_params.shape[2]))
    # Run SS
    ss_outputs = SS.run_SS(spec, client=dask_client)
    # save SS results
    utils.mkdirs(os.path.join(OUTPUT_DIR, "SS"))
    ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(ss_outputs, f)
    # Run TPI
    tpi_output = TPI.run_TPI(spec, client=dask_client)
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
예제 #26
0
def run_model(meta_param_dict, adjustment):
    """
    Initializes classes from OG-USA that compute the model under
    different policies.  Then calls function get output objects.
    """
    print("Meta_param_dict = ", meta_param_dict)
    print("adjustment dict = ", adjustment)

    meta_params = MetaParams()
    meta_params.adjust(meta_param_dict)
    if meta_params.data_source == "PUF":
        data = retrieve_puf(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        # set name of cached baseline file in case use below
        cached_pickle = "TxFuncEst_baseline_PUF.pkl"
    else:
        data = "cps"
        # set name of cached baseline file in case use below
        cached_pickle = "TxFuncEst_baseline_CPS.pkl"
    # Get TC params adjustments
    iit_mods = convert_policy_adjustment(
        adjustment["Tax-Calculator Parameters"]
    )
    # Create output directory structure
    base_dir = os.path.join(CUR_DIR, BASELINE_DIR)
    reform_dir = os.path.join(CUR_DIR, REFORM_DIR)
    dirs = [base_dir, reform_dir]
    for _dir in dirs:
        utils.mkdirs(_dir)

    # Dask parmeters
    client = Client()
    num_workers = 5
    # TODO: Swap to these parameters when able to specify tax function
    # and model workers separately
    # num_workers_txf = 5
    # num_workers_mod = 6

    # whether to estimate tax functions from microdata
    run_micro = True
    time_path = meta_param_dict["time_path"][0]["value"]

    # filter out OG-USA params that will not change between baseline and
    # reform runs (these are the non-policy parameters)
    filtered_ogusa_params = {}
    constant_param_set = {
        "frisch",
        "beta_annual",
        "sigma",
        "g_y_annual",
        "gamma",
        "epsilon",
        "Z",
        "delta_annual",
        "small_open",
        "world_int_rate",
        "initial_debt_ratio",
        "initial_foreign_debt_ratio",
        "zeta_D",
        "zeta_K",
        "tG1",
        "tG2",
        "rho_G",
        "debt_ratio_ss",
        "budget_balance",
    }
    filtered_ogusa_params = OrderedDict()
    for k, v in adjustment["OG-USA Parameters"].items():
        if k in constant_param_set:
            filtered_ogusa_params[k] = v

    # Solve baseline model
    start_year = meta_param_dict["year"][0]["value"]
    if start_year == 2020:
        OGPATH = inspect.getfile(SS)
        OGDIR = os.path.dirname(OGPATH)
        tax_func_path = None  # os.path.join(OGDIR, 'data', 'tax_functions',
        #             cached_pickle)
        run_micro_baseline = True
    else:
        tax_func_path = None
        run_micro_baseline = True
    base_spec = {
        **{
            "start_year": start_year,
            "tax_func_type": "DEP",
            "age_specific": False,
        },
        **filtered_ogusa_params,
    }
    base_params = Specifications(
        output_base=base_dir,
        baseline_dir=base_dir,
        baseline=True,
        num_workers=num_workers,
    )
    base_params.update_specifications(
        json.load(
            open(
                os.path.join(
                    "..", "..", "ogusa", "ogusa_default_parameters.json"
                )
            )
        )
    )
    base_params.update_specifications(base_spec)
    BW = TC_LAST_YEAR - start_year + 1
    base_params.BW = BW
    # Will need to figure out how to handle default tax functions here
    # For now, estimating tax functions even for baseline
    c_base = Calibration(
        base_params,
        iit_reform={},
        estimate_tax_functions=True,
        data=data,
        client=client,
    )
    # update tax function parameters in Specifications Object
    d_base = c_base.get_dict()
    # additional parameters to change
    updated_txfunc_params = {
        "etr_params": d_base["etr_params"],
        "mtrx_params": d_base["mtrx_params"],
        "mtry_params": d_base["mtry_params"],
        "mean_income_data": d_base["mean_income_data"],
        "frac_tax_payroll": d_base["frac_tax_payroll"],
    }
    base_params.update_specifications(updated_txfunc_params)
    base_ss = SS.run_SS(base_params, client=client)
    utils.mkdirs(os.path.join(base_dir, "SS"))
    base_ss_dir = os.path.join(base_dir, "SS", "SS_vars.pkl")
    with open(base_ss_dir, "wb") as f:
        pickle.dump(base_ss, f)
    if time_path:
        base_tpi = TPI.run_TPI(base_params, client=client)
        tpi_dir = os.path.join(base_dir, "TPI", "TPI_vars.pkl")
        with open(tpi_dir, "wb") as f:
            pickle.dump(base_tpi, f)
    else:
        base_tpi = None

    # Solve reform model
    reform_spec = base_spec
    reform_spec.update(adjustment["OG-USA Parameters"])
    reform_params = Specifications(
        output_base=reform_dir,
        baseline_dir=base_dir,
        baseline=False,
        num_workers=num_workers,
    )
    reform_params.update_specifications(
        json.load(
            open(
                os.path.join(
                    "..", "..", "ogusa", "ogusa_default_parameters.json"
                )
            )
        )
    )
    reform_params.update_specifications(reform_spec)
    reform_params.BW = BW
    c_reform = Calibration(
        reform_params,
        iit_reform=iit_mods,
        estimate_tax_functions=True,
        data=data,
        client=client,
    )
    # update tax function parameters in Specifications Object
    d_reform = c_reform.get_dict()
    # additional parameters to change
    updated_txfunc_params = {
        "etr_params": d_reform["etr_params"],
        "mtrx_params": d_reform["mtrx_params"],
        "mtry_params": d_reform["mtry_params"],
        "mean_income_data": d_reform["mean_income_data"],
        "frac_tax_payroll": d_reform["frac_tax_payroll"],
    }
    reform_params.update_specifications(updated_txfunc_params)
    reform_ss = SS.run_SS(reform_params, client=client)
    utils.mkdirs(os.path.join(reform_dir, "SS"))
    reform_ss_dir = os.path.join(reform_dir, "SS", "SS_vars.pkl")
    with open(reform_ss_dir, "wb") as f:
        pickle.dump(reform_ss, f)
    if time_path:
        reform_tpi = TPI.run_TPI(reform_params, client=client)
    else:
        reform_tpi = None

    comp_dict = comp_output(
        base_params,
        base_ss,
        reform_params,
        reform_ss,
        time_path,
        base_tpi,
        reform_tpi,
    )

    # Shut down client and make sure all of its references are
    # cleaned up.
    client.close()
    del client

    return comp_dict
예제 #27
0
    '''
    ineq = Inequality(dist, pop_weights, ability_weights, S, J)
    top_share = ineq.top_share(0.05)
    assert np.allclose(top_share, 0.285714286)


def test_to_timepath_shape():
    '''
    Test of function that converts vector to time path conformable array
    '''
    in_array = np.ones(40)
    test_array = utils.to_timepath_shape(in_array)
    assert test_array.shape == (40, 1, 1)


p = Specifications()
p.T = 40
p.S = 3
p.J = 1
x1 = np.ones((p.S, p.J)) * 0.4
xT = np.ones((p.S, p.J)) * 5.0
expected1 = np.tile(
    np.array([
        0.4, 0.51794872, 0.63589744, 0.75384615, 0.87179487, 0.98974359,
        1.10769231, 1.22564103, 1.34358974, 1.46153846, 1.57948718, 1.6974359,
        1.81538462, 1.93333333, 2.05128205, 2.16923077, 2.28717949, 2.40512821,
        2.52307692, 2.64102564, 2.75897436, 2.87692308, 2.99487179, 3.11282051,
        3.23076923, 3.34871795, 3.46666667, 3.58461538, 3.7025641, 3.82051282,
        3.93846154, 4.05641026, 4.17435897, 4.29230769, 4.41025641, 4.52820513,
        4.64615385, 4.76410256, 4.88205128, 5., 5.0, 5.0, 5.0
    ]).reshape(p.T + p.S, 1, 1), (1, p.S, p.J))
예제 #28
0
def test_run_SS(baseline, param_updates, filename, dask_client):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    SS.ENFORCE_SOLUTION_CHECKS = True  #False
    # if running reform, then need to solve baseline first to get values
    if baseline is False:
        p_base = Specifications(output_base=constants.BASELINE_DIR,
                                baseline_dir=constants.BASELINE_DIR,
                                baseline=True,
                                num_workers=NUM_WORKERS)
        p_base.update_specifications(param_updates)
        if p_base.use_zeta:
            p_base.update_specifications({
                'initial_guess_r_SS': 0.10,
                'initial_guess_TR_SS': 0.02
            })
        p_base.baseline_spending = False
        base_ss_outputs = SS.run_SS(p_base, client=dask_client)
        utils.mkdirs(os.path.join(constants.BASELINE_DIR, "SS"))
        ss_dir = os.path.join(constants.BASELINE_DIR, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(base_ss_outputs, f)
    # now run specification for test
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    test_dict = SS.run_SS(p, client=dask_client)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        print('Checking item = ', k)
        assert (np.allclose(test_dict[k], v, atol=1e-06))
예제 #29
0
def main():
    # Define parameters to use for multiprocessing
    client = Client()
    num_workers = min(multiprocessing.cpu_count(), 7)
    print("Number of workers = ", num_workers)

    # Directories to save data
    CUR_DIR = os.path.dirname(os.path.realpath(__file__))
    base_dir = os.path.join(CUR_DIR, "OG-USA-Example", "OUTPUT_BASELINE")
    reform_dir = os.path.join(CUR_DIR, "OG-USA-Example", "OUTPUT_REFORM")
    """
    ------------------------------------------------------------------------
    Run baseline policy
    ------------------------------------------------------------------------
    """
    # Set up baseline parameterization
    p = Specifications(
        baseline=True,
        num_workers=num_workers,
        baseline_dir=base_dir,
        output_base=base_dir,
    )
    # Update parameters for baseline from default json file
    p.update_specifications(
        json.load(
            open(
                os.path.join(CUR_DIR, "..", "ogusa",
                             "ogusa_default_parameters.json"))))

    # Run model
    start_time = time.time()
    runner(p, time_path=True, client=client)
    print("run time = ", time.time() - start_time)
    """
    ------------------------------------------------------------------------
    Run reform policy
    ------------------------------------------------------------------------
    """
    # Grab a reform JSON file already in Tax-Calculator
    # In this example the 'reform' is a change to 2017 law (the
    # baseline policy is tax law in 2018)
    reform_url = ("github://*****:*****@main/psl_examples/" +
                  "taxcalc/2017_law.json")
    ref = Calculator.read_json_param_objects(reform_url, None)
    iit_reform = ref["policy"]

    # create new Specifications object for reform simulation
    p2 = Specifications(
        baseline=False,
        num_workers=num_workers,
        baseline_dir=base_dir,
        output_base=reform_dir,
    )
    # Update parameters for baseline from default json file
    p2.update_specifications(
        json.load(
            open(
                os.path.join(CUR_DIR, "..", "ogusa",
                             "ogusa_default_parameters.json"))))
    # Use calibration class to estimate reform tax functions from
    # Tax-Calculator, specifing reform for Tax-Calculator in iit_reform
    c2 = Calibration(p2,
                     iit_reform=iit_reform,
                     estimate_tax_functions=True,
                     client=client)
    # update tax function parameters in Specifications Object
    d = c2.get_dict()
    # additional parameters to change
    updated_params = {
        "cit_rate": [0.35],
        "etr_params": d["etr_params"],
        "mtrx_params": d["mtrx_params"],
        "mtry_params": d["mtry_params"],
        "mean_income_data": d["mean_income_data"],
        "frac_tax_payroll": d["frac_tax_payroll"],
    }
    p2.update_specifications(updated_params)
    # Run model
    start_time = time.time()
    runner(p2, time_path=True, client=client)
    print("run time = ", time.time() - start_time)
    client.close()
    """
    ------------------------------------------------------------------------
    Save some results of simulations
    ------------------------------------------------------------------------
    """
    base_tpi = safe_read_pickle(os.path.join(base_dir, "TPI", "TPI_vars.pkl"))
    base_params = safe_read_pickle(os.path.join(base_dir, "model_params.pkl"))
    reform_tpi = safe_read_pickle(
        os.path.join(reform_dir, "TPI", "TPI_vars.pkl"))
    reform_params = safe_read_pickle(
        os.path.join(reform_dir, "model_params.pkl"))
    ans = ot.macro_table(
        base_tpi,
        base_params,
        reform_tpi=reform_tpi,
        reform_params=reform_params,
        var_list=["Y", "C", "K", "L", "r", "w"],
        output_type="pct_diff",
        num_years=10,
        start_year=base_params.start_year,
    )

    # create plots of output
    op.plot_all(base_dir, reform_dir,
                os.path.join(CUR_DIR, "OG-USA_example_plots"))

    print("Percentage changes in aggregates:", ans)
    # save percentage change output to csv file
    ans.to_csv("ogusa_example_output.csv")
예제 #30
0
@pytest.fixture(scope="module")
def dask_client():
    cluster = LocalCluster(n_workers=NUM_WORKERS, threads_per_worker=2)
    client = Client(cluster)
    yield client
    # teardown
    client.close()
    cluster.close()


input_tuple = utils.safe_read_pickle(
    os.path.join(CUR_PATH, 'test_io_data', 'SS_fsolve_inputs.pkl'))
(bssmat, nssmat, TR_ss, factor_ss) = input_tuple
# Parameterize the baseline, closed econ case
p1 = Specifications(baseline=True)
p1.update_specifications({'zeta_D': [0.0], 'zeta_K': [0.0]})
guesses1 = np.array(
    [0.06, 0.016, 0.02, 0.02, 0.01, 0.01, 0.02, 0.003, -0.07, 0.051])
args1 = (bssmat, nssmat, None, None, p1, None)
expected1 = np.array([
    -0.026632037158481975, -0.0022739752626707334, -0.01871875707724979,
    -0.01791935965422934, 0.005996289165268601, 0.00964100151012603,
    -0.01953460990186908, -0.0029633389016814967, 0.1306862551496613,
    0.11574464544202477
])
# Parameterize the reform, closed econ case
p2 = Specifications(baseline=False)
p2.update_specifications({'zeta_D': [0.0], 'zeta_K': [0.0]})
guesses2 = np.array([0.06, 0.016, 0.02, 0.02, 0.01, 0.01, 0.02, 0.003, -0.07])
args2 = (bssmat, nssmat, None, 0.51, p2, None)