Пример #1
0
def test_run_SS(baseline, param_updates, filename, dask_client):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    SS.ENFORCE_SOLUTION_CHECKS = True  #False
    # if running reform, then need to solve baseline first to get values
    if baseline is False:
        p_base = Specifications(output_base=constants.BASELINE_DIR,
                                baseline_dir=constants.BASELINE_DIR,
                                baseline=True,
                                num_workers=NUM_WORKERS)
        p_base.update_specifications(param_updates)
        if p_base.use_zeta:
            p_base.update_specifications({
                'initial_guess_r_SS': 0.10,
                'initial_guess_TR_SS': 0.02
            })
        p_base.baseline_spending = False
        base_ss_outputs = SS.run_SS(p_base, client=dask_client)
        utils.mkdirs(os.path.join(constants.BASELINE_DIR, "SS"))
        ss_dir = os.path.join(constants.BASELINE_DIR, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(base_ss_outputs, f)
    # now run specification for test
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    test_dict = SS.run_SS(p, client=dask_client)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        print('Checking item = ', k)
        assert (np.allclose(test_dict[k], v, atol=1e-06))
Пример #2
0
def test_inner_loop(baseline, param_updates, filename, dask_client):
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    bssmat = np.ones((p.S, p.J)) * 0.07
    nssmat = np.ones((p.S, p.J)) * .4 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        r = p.world_int_rate[-1]
    else:
        r = 0.05
    TR = 0.12
    Y = 1.3
    factor = 100000
    BQ = np.ones(p.J) * 0.00019646295986015257
    if p.budget_balance:
        outer_loop_vars = (bssmat, nssmat, r, BQ, TR, factor)
    else:
        outer_loop_vars = (bssmat, nssmat, r, BQ, Y, TR, factor)
    test_tuple = SS.inner_loop(outer_loop_vars, p, dask_client)
    expected_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))
    for i, v in enumerate(expected_tuple):
        print('Max diff = ', np.absolute(test_tuple[i] - v).max())
        print('Checking item = ', i)
        assert (np.allclose(test_tuple[i], v, atol=4e-05))
Пример #3
0
def test_SS_fsolve(guesses, args, expected):
    '''
    Test SS.SS_fsolve function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    test_list = SS.SS_fsolve(guesses, *args)
    assert (np.allclose(np.array(test_list), np.array(expected), atol=1e-6))
Пример #4
0
def test_SS_solver_extra(baseline, param_updates, filename, dask_client):
    # Test SS.SS_solver function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    p = Specifications(baseline=baseline, num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.output_base = CUR_PATH
    b_guess = np.ones((p.S, p.J)) * 0.07
    n_guess = np.ones((p.S, p.J)) * .35 * p.ltilde
    if p.zeta_K[-1] == 1.0:
        rguess = p.world_int_rate[-1]
    else:
        rguess = 0.06483431412921253
    TRguess = 0.05738932081035772
    factorguess = 139355.1547340256
    BQguess = aggregates.get_BQ(rguess, b_guess, None, p, 'SS', False)
    Yguess = 0.6376591201150815

    test_dict = SS.SS_solver(b_guess, n_guess, rguess, BQguess, TRguess,
                             factorguess, Yguess, p, dask_client, False)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        print('Testing ', k)
        assert (np.allclose(test_dict[k], v, atol=1e-05, equal_nan=True))
Пример #5
0
def test_euler_equation_solver(input_tuple, ubi_j, p, expected):
    # Test SS.inner_loop function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    guesses, r, w, bq, tr, _, factor, j = input_tuple
    args = (r, w, bq, tr, ubi_j, factor, j, p)
    test_list = SS.euler_equation_solver(guesses, *args)
    print(repr(test_list))

    assert (np.allclose(np.array(test_list), np.array(expected)))
Пример #6
0
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client):
    '''
    Test TPI.run_TPI function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    baseline_dir = os.path.join(CUR_PATH, 'baseline')
    if baseline:
        output_base = baseline_dir
    else:
        output_base = os.path.join(CUR_PATH, 'reform')
    p = Specifications(baseline=baseline,
                       baseline_dir=baseline_dir,
                       output_base=output_base,
                       num_workers=NUM_WORKERS)
    test_params = TEST_PARAM_DICT.copy()
    test_params.update(param_updates)
    p.update_specifications(test_params)
    p.maxiter = 2  # this test runs through just two iterations

    # Need to run SS first to get results
    SS.ENFORCE_SOLUTION_CHECKS = False
    ss_outputs = SS.run_SS(p, client=dask_client)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)

    TPI.ENFORCE_SOLUTION_CHECKS = False
    test_dict = TPI.run_TPI(p, client=dask_client)
    expected_dict = utils.safe_read_pickle(filename)

    for k, v in expected_dict.items():
        print('Max diff in ', k, ' = ')
        try:
            print(np.absolute(test_dict[k][:p.T] - v[:p.T]).max())
        except ValueError:
            print(np.absolute(test_dict[k][:p.T, :, :] - v[:p.T, :, :]).max())

    for k, v in expected_dict.items():
        try:
            assert (np.allclose(test_dict[k][:p.T],
                                v[:p.T],
                                rtol=1e-04,
                                atol=1e-04))
        except ValueError:
            assert (np.allclose(test_dict[k][:p.T, :, :],
                                v[:p.T, :, :],
                                rtol=1e-04,
                                atol=1e-04))
Пример #7
0
def compute_se(beta_hat, W, K, p, h=0.01, client=None):
    """
    Function to compute standard errors for the SMM estimator.

    Args:
        beta_hat (array-like): estimates of beta parameters
        W (Numpy array): weighting matrix
        K (int): number of moments
        p (OG-USA Specifications object): model parameters
        h (scalar): percentage to move parameters for numerical derivatives
        client (Dask Client object): Dask client

    Returns:
        beta_se (array-like): standard errors for beta estimates
        VCV_params (Numpy array): VCV matrix for parameter estimates

    """
    # compute numerical derivatives that will need for SE's
    model_moments_low = np.zeros((p.J, K))
    model_moments_high = np.zeros((p.J, K))
    beta_low = beta_hat
    beta_high = beta_hat
    for i in range(len(beta_hat)):
        # compute moments with downward change in param
        beta_low[i] = beta_hat[i] * (1 + h)
        p.beta = beta_low
        ss_output = ss_output = SS.run_SS(p, client=client)
        model_moments_low[i, :] = calc_moments(ss_output, p)
        # compute moments with upward change in param
        beta_high[i] = beta_hat[i] * (1 - h)
        p.beta = beta_low
        ss_output = ss_output = SS.run_SS(p, client=client)
        model_moments_high[i, :] = calc_moments(ss_output, p)

    deriv_moments = (model_moments_high - model_moments_low).T / (2 * h *
                                                                  beta_hat)
    VCV_params = np.linalg.inv(
        np.dot(np.dot(deriv_moments.T, W), deriv_moments))
    beta_se = (np.diag(VCV_params))**(1 / 2)

    return beta_se, VCV_params
Пример #8
0
def test_constant_demographics_TPI(dask_client):
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesses for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    # Create output directory structure
    spec = Specifications(output_base=CUR_PATH,
                          baseline_dir=OUTPUT_DIR,
                          baseline=True,
                          num_workers=NUM_WORKERS)
    og_spec = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2,
        'r_gov_shift': 0.0,
        'zeta_D': [0.0, 0.0],
        'zeta_K': [0.0, 0.0],
        'debt_ratio_ss': 1.0,
        'initial_foreign_debt_ratio': 0.0,
        'start_year': 2019,
        'cit_rate': [0.0],
        'PIA_rate_bkt_1': 0.0,
        'PIA_rate_bkt_2': 0.0,
        'PIA_rate_bkt_3': 0.0,
        'eta':
        (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J))
    }
    spec.update_specifications(og_spec)
    spec.etr_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.etr_params.shape[2]))
    spec.mtrx_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.mtrx_params.shape[2]))
    spec.mtry_params = np.zeros(
        (spec.T + spec.S, spec.S, spec.mtry_params.shape[2]))
    # Run SS
    ss_outputs = SS.run_SS(spec, client=dask_client)
    # save SS results
    utils.mkdirs(os.path.join(OUTPUT_DIR, "SS"))
    ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(ss_outputs, f)
    # Run TPI
    tpi_output = TPI.run_TPI(spec, client=dask_client)
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
Пример #9
0
def minstat(beta_guesses, *args):
    """
    This function generates the weighted sum of squared differences
    between the model and data moments.

    Args:
        beta_guesses (array-like): a vector of length J with the betas
        args (tuple): length 6 tuple, variables needed for minimizer

    Returns:
        distance (scalar): weighted, squared deviation between data and
            model moments

    """
    # unpack args tuple
    data_moments, W, p, client = args

    # Update beta in parameters object with beta guesses
    p.beta = beta_guesses

    # Solve model SS
    print("Baseline = ", p.baseline)
    ss_output = SS.run_SS(p, client=client)

    # Compute moments from model SS
    model_moments = calc_moments(ss_output, p)

    # distance with levels
    distance = np.dot(
        np.dot((np.array(model_moments) - np.array(data_moments)).T, W),
        np.array(model_moments) - np.array(data_moments),
    )

    print("DATA and MODEL DISTANCE: ", distance)

    return distance
Пример #10
0
def run_model(meta_param_dict, adjustment):
    """
    Initializes classes from OG-USA that compute the model under
    different policies.  Then calls function get output objects.
    """
    print("Meta_param_dict = ", meta_param_dict)
    print("adjustment dict = ", adjustment)

    meta_params = MetaParams()
    meta_params.adjust(meta_param_dict)
    if meta_params.data_source == "PUF":
        data = retrieve_puf(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        # set name of cached baseline file in case use below
        cached_pickle = "TxFuncEst_baseline_PUF.pkl"
    else:
        data = "cps"
        # set name of cached baseline file in case use below
        cached_pickle = "TxFuncEst_baseline_CPS.pkl"
    # Get TC params adjustments
    iit_mods = convert_policy_adjustment(
        adjustment["Tax-Calculator Parameters"]
    )
    # Create output directory structure
    base_dir = os.path.join(CUR_DIR, BASELINE_DIR)
    reform_dir = os.path.join(CUR_DIR, REFORM_DIR)
    dirs = [base_dir, reform_dir]
    for _dir in dirs:
        utils.mkdirs(_dir)

    # Dask parmeters
    client = Client()
    num_workers = 5
    # TODO: Swap to these parameters when able to specify tax function
    # and model workers separately
    # num_workers_txf = 5
    # num_workers_mod = 6

    # whether to estimate tax functions from microdata
    run_micro = True
    time_path = meta_param_dict["time_path"][0]["value"]

    # filter out OG-USA params that will not change between baseline and
    # reform runs (these are the non-policy parameters)
    filtered_ogusa_params = {}
    constant_param_set = {
        "frisch",
        "beta_annual",
        "sigma",
        "g_y_annual",
        "gamma",
        "epsilon",
        "Z",
        "delta_annual",
        "small_open",
        "world_int_rate",
        "initial_debt_ratio",
        "initial_foreign_debt_ratio",
        "zeta_D",
        "zeta_K",
        "tG1",
        "tG2",
        "rho_G",
        "debt_ratio_ss",
        "budget_balance",
    }
    filtered_ogusa_params = OrderedDict()
    for k, v in adjustment["OG-USA Parameters"].items():
        if k in constant_param_set:
            filtered_ogusa_params[k] = v

    # Solve baseline model
    start_year = meta_param_dict["year"][0]["value"]
    if start_year == 2020:
        OGPATH = inspect.getfile(SS)
        OGDIR = os.path.dirname(OGPATH)
        tax_func_path = None  # os.path.join(OGDIR, 'data', 'tax_functions',
        #             cached_pickle)
        run_micro_baseline = True
    else:
        tax_func_path = None
        run_micro_baseline = True
    base_spec = {
        **{
            "start_year": start_year,
            "tax_func_type": "DEP",
            "age_specific": False,
        },
        **filtered_ogusa_params,
    }
    base_params = Specifications(
        output_base=base_dir,
        baseline_dir=base_dir,
        baseline=True,
        num_workers=num_workers,
    )
    base_params.update_specifications(
        json.load(
            open(
                os.path.join(
                    "..", "..", "ogusa", "ogusa_default_parameters.json"
                )
            )
        )
    )
    base_params.update_specifications(base_spec)
    BW = TC_LAST_YEAR - start_year + 1
    base_params.BW = BW
    # Will need to figure out how to handle default tax functions here
    # For now, estimating tax functions even for baseline
    c_base = Calibration(
        base_params,
        iit_reform={},
        estimate_tax_functions=True,
        data=data,
        client=client,
    )
    # update tax function parameters in Specifications Object
    d_base = c_base.get_dict()
    # additional parameters to change
    updated_txfunc_params = {
        "etr_params": d_base["etr_params"],
        "mtrx_params": d_base["mtrx_params"],
        "mtry_params": d_base["mtry_params"],
        "mean_income_data": d_base["mean_income_data"],
        "frac_tax_payroll": d_base["frac_tax_payroll"],
    }
    base_params.update_specifications(updated_txfunc_params)
    base_ss = SS.run_SS(base_params, client=client)
    utils.mkdirs(os.path.join(base_dir, "SS"))
    base_ss_dir = os.path.join(base_dir, "SS", "SS_vars.pkl")
    with open(base_ss_dir, "wb") as f:
        pickle.dump(base_ss, f)
    if time_path:
        base_tpi = TPI.run_TPI(base_params, client=client)
        tpi_dir = os.path.join(base_dir, "TPI", "TPI_vars.pkl")
        with open(tpi_dir, "wb") as f:
            pickle.dump(base_tpi, f)
    else:
        base_tpi = None

    # Solve reform model
    reform_spec = base_spec
    reform_spec.update(adjustment["OG-USA Parameters"])
    reform_params = Specifications(
        output_base=reform_dir,
        baseline_dir=base_dir,
        baseline=False,
        num_workers=num_workers,
    )
    reform_params.update_specifications(
        json.load(
            open(
                os.path.join(
                    "..", "..", "ogusa", "ogusa_default_parameters.json"
                )
            )
        )
    )
    reform_params.update_specifications(reform_spec)
    reform_params.BW = BW
    c_reform = Calibration(
        reform_params,
        iit_reform=iit_mods,
        estimate_tax_functions=True,
        data=data,
        client=client,
    )
    # update tax function parameters in Specifications Object
    d_reform = c_reform.get_dict()
    # additional parameters to change
    updated_txfunc_params = {
        "etr_params": d_reform["etr_params"],
        "mtrx_params": d_reform["mtrx_params"],
        "mtry_params": d_reform["mtry_params"],
        "mean_income_data": d_reform["mean_income_data"],
        "frac_tax_payroll": d_reform["frac_tax_payroll"],
    }
    reform_params.update_specifications(updated_txfunc_params)
    reform_ss = SS.run_SS(reform_params, client=client)
    utils.mkdirs(os.path.join(reform_dir, "SS"))
    reform_ss_dir = os.path.join(reform_dir, "SS", "SS_vars.pkl")
    with open(reform_ss_dir, "wb") as f:
        pickle.dump(reform_ss, f)
    if time_path:
        reform_tpi = TPI.run_TPI(reform_params, client=client)
    else:
        reform_tpi = None

    comp_dict = comp_output(
        base_params,
        base_ss,
        reform_params,
        reform_ss,
        time_path,
        base_tpi,
        reform_tpi,
    )

    # Shut down client and make sure all of its references are
    # cleaned up.
    client.close()
    del client

    return comp_dict
Пример #11
0
def chi_estimate(
    income_tax_params,
    ss_params,
    iterative_params,
    chi_guesses,
    baseline_dir="./OUTPUT",
):
    """
    --------------------------------------------------------------------
    This function calls others to obtain the data momements and then
    runs the simulated method of moments estimation by calling the
    minimization routine.

    INPUTS:
    income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
    ss_parameters         = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
                            g_n_ss, tau_payroll, retire, mean_income_data,\
                            h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
    iterative_params      = [2,] vector, vector with max iterations and tolerance
                             for SS solution
    chi_guesses           = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
    baseline_dir          = string, path where baseline results located


    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
    wealth.compute_wealth_moments()
    labor.labor_data_moments()
    minstat()

    OBJECTS CREATED WITHIN FUNCTION:
    wealth_moments     = [J+2,] array, wealth moments from data
    labor_moments      = [S,] array, labor moments from data
    data_moments       = [J+2+S,] array, wealth and labor moments stacked
    bnds               = [S+J,] array, bounds for parameter estimates
    chi_guesses_flat   =  [J+S,] vector, initial guesses of chi_b and chi_n stacked
    min_arg            = length 6 tuple, variables needed for minimizer
    est_output         = dictionary, output from minimizer
    chi_params         = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
    objective_func_min = scalar, minimum of statistical objective function


    OUTPUT:
    ./baseline_dir/Calibration/chi_estimation.pkl


    RETURNS: chi_params
    --------------------------------------------------------------------
    """

    # unpack tuples of parameters
    (
        J,
        S,
        T,
        BW,
        beta,
        sigma,
        alpha,
        Z,
        delta,
        ltilde,
        nu,
        g_y,
        g_n_ss,
        tau_payroll,
        tau_bq,
        rho,
        omega_SS,
        lambdas,
        imm_rates,
        e,
        retire,
        mean_income_data,
        h_wealth,
        p_wealth,
        m_wealth,
        b_ellipse,
        upsilon,
    ) = ss_params
    chi_b_guess, chi_n_guess = chi_guesses

    flag_graphs = False

    # specify bootstrap iterations
    n = 10000

    # Generate Wealth data moments
    scf, data = wealth.get_wealth_data()
    wealth_moments = wealth.compute_wealth_moments(scf, lambdas, J)

    # Generate labor data moments
    cps = labor.get_labor_data()
    labor_moments = labor.compute_labor_moments(cps, S)

    # combine moments
    data_moments = list(wealth_moments.flatten()) + list(
        labor_moments.flatten()
    )

    # determine weighting matrix
    optimal_weight = False
    if optimal_weight:
        VCV_wealth_moments = wealth.VCV_moments(scf, n, lambdas, J)
        VCV_labor_moments = labor.VCV_moments(cps, n, lambdas, S)
        VCV_data_moments = np.zeros((J + 2 + S, J + 2 + S))
        VCV_data_moments[: J + 2, : J + 2] = VCV_wealth_moments
        VCV_data_moments[J + 2 :, J + 2 :] = VCV_labor_moments
        W = np.linalg.inv(VCV_data_moments)
        # np.savetxt('VCV_data_moments.csv',VCV_data_moments)
    else:
        W = np.identity(J + 2 + S)

    # call minimizer
    bnds = np.tile(
        np.array([1e-12, None]), (S + J, 1)
    )  # Need (1e-12, None) S+J times
    chi_guesses_flat = list(chi_b_guess.flatten()) + list(
        chi_n_guess.flatten()
    )

    min_args = (
        data_moments,
        W,
        income_tax_params,
        ss_params,
        iterative_params,
        chi_guesses_flat,
        baseline_dir,
    )
    # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds,
    #                 tol=1e-15, options={'maxfun': 1, 'maxiter': 1, 'maxls': 1})
    # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds,
    #                 tol=1e-15)
    # chi_params = est_output.x
    # objective_func_min = est_output.fun
    #
    # # pickle output
    # utils.mkdirs(os.path.join(baseline_dir, "Calibration"))
    # est_dir = os.path.join(baseline_dir, "Calibration/chi_estimation.pkl")
    # pickle.dump(est_output, open(est_dir, "wb"))
    #
    # # save data and model moments and min stat to csv
    # # to then put in table of paper
    chi_params = chi_guesses_flat
    chi_b = chi_params[:J]
    chi_n = chi_params[J:]
    chi_params_list = (chi_b, chi_n)

    ss_output = SS.run_SS(
        income_tax_params,
        ss_params,
        iterative_params,
        chi_params_list,
        True,
        baseline_dir,
    )
    model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J)

    # # make dataframe for results
    # columns = ['data_moment', 'model_moment', 'minstat']
    # moment_fit = pd.DataFrame(index=range(0,J+2+S), columns=columns)
    # moment_fit = moment_fit.fillna(0) # with 0s rather than NaNs
    # moment_fit['data_moment'] = data_moments
    # moment_fit['model_moment'] = model_moments
    # moment_fit['minstat'] = objective_func_min
    # est_dir = os.path.join(baseline_dir, "Calibration/moment_results.pkl")s
    # moment_fit.to_csv(est_dir)

    # calculate std errors
    h = 0.0001  # pct change in parameter
    model_moments_low = np.zeros((len(chi_params), len(model_moments)))
    model_moments_high = np.zeros((len(chi_params), len(model_moments)))
    chi_params_low = chi_params
    chi_params_high = chi_params
    for i in range(len(chi_params)):
        chi_params_low[i] = chi_params[i] * (1 + h)
        chi_b = chi_params_low[:J]
        chi_n = chi_params_low[J:]
        chi_params_list = (chi_b, chi_n)
        ss_output = SS.run_SS(
            income_tax_params,
            ss_params,
            iterative_params,
            chi_params_list,
            True,
            baseline_dir,
        )
        model_moments_low[i, :] = calc_moments(
            ss_output, omega_SS, lambdas, S, J
        )

        chi_params_high[i] = chi_params[i] * (1 + h)
        chi_b = chi_params_high[:J]
        chi_n = chi_params_high[J:]
        chi_params_list = (chi_b, chi_n)
        ss_output = SS.run_SS(
            income_tax_params,
            ss_params,
            iterative_params,
            chi_params_list,
            True,
            baseline_dir,
        )
        model_moments_high[i, :] = calc_moments(
            ss_output, omega_SS, lambdas, S, J
        )

    deriv_moments = (
        np.asarray(model_moments_high) - np.asarray(model_moments_low)
    ).T / (2.0 * h * np.asarray(chi_params))
    VCV_params = np.linalg.inv(
        np.dot(np.dot(deriv_moments.T, W), deriv_moments)
    )
    std_errors_chi = (np.diag(VCV_params)) ** (1 / 2.0)
    sd_dir = os.path.join(baseline_dir, "Calibration/chi_std_errors.pkl")
    with open(sd_dir, "wb") as f:
        pickle.dump(std_errors_chi, f)

    np.savetxt("chi_std_errors.csv", std_errors_chi)

    return chi_params
Пример #12
0
def minstat(chi_guesses, *args):
    """
    --------------------------------------------------------------------
    This function generates the weighted sum of squared differences
    between the model and data moments.

    INPUTS:
    chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
    arg         = length 6 tuple, variables needed for minimizer


    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
    SS.run_SS()
    calc_moments()

    OBJECTS CREATED WITHIN FUNCTION:
    ss_output     = dictionary, variables from SS of model
    model_moments = [J+2+S,] array, moments from the model solution
    distance      = scalar, weighted, squared deviation between data and model moments

    RETURNS: distance
    --------------------------------------------------------------------
    """

    (
        data_moments,
        W,
        income_tax_params,
        ss_params,
        iterative_params,
        chi_params,
        baseline_dir,
    ) = args
    (
        J,
        S,
        T,
        BW,
        beta,
        sigma,
        alpha,
        Z,
        delta,
        ltilde,
        nu,
        g_y,
        g_n_ss,
        tau_payroll,
        tau_bq,
        rho,
        omega_SS,
        lambdas,
        imm_rates,
        e,
        retire,
        mean_income_data,
        h_wealth,
        p_wealth,
        m_wealth,
        b_ellipse,
        upsilon,
    ) = ss_params
    chi_b = chi_guesses[:J]
    chi_n = chi_guesses[J:]
    chi_params = (chi_b, chi_n)
    ss_output = SS.run_SS(
        income_tax_params,
        ss_params,
        iterative_params,
        chi_params,
        True,
        baseline_dir,
    )

    model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J)

    # distance with levels
    distance = np.dot(
        np.dot((np.array(model_moments) - np.array(data_moments)).T, W),
        np.array(model_moments) - np.array(data_moments),
    )
    # distance = ((np.array(model_moments) - np.array(data_moments))**2).sum()
    print("DATA and MODEL DISTANCE: ", distance)

    # # distance with percentage diffs
    # distance = (((model_moments - data_moments)/data_moments)**2).sum()

    return distance
Пример #13
0
def runner(p, time_path=True, client=None):
    '''
    This function runs the OG-Core model, solving for the steady-state
    and (optionally) the time path equilibrium.

    Args:
        p (Specifications object): model parameters
        time_path (bool): whether to solve for the time path equilibrium
        client (Dask client object): client

    Returns:
        None

    '''

    tick = time.time()
    # Create output directory structure
    ss_dir = os.path.join(p.output_base, "SS")
    tpi_dir = os.path.join(p.output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError:
            pass

    print('In runner, baseline is ', p.baseline)
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''
    ss_outputs = SS.run_SS(p, client=client)
    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    utils.mkdirs(os.path.join(p.output_base, "SS"))
    ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(ss_outputs, f)
    print('JUST SAVED SS output to ', ss_dir)
    # Save pickle with parameter values for the run
    param_dir = os.path.join(p.output_base, "model_params.pkl")
    with open(param_dir, "wb") as f:
        cloudpickle.dump((p), f)

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''
        tpi_output = TPI.run_TPI(p, client=client)
        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(p.output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        with open(tpi_vars, "wb") as f:
            pickle.dump(tpi_output, f)

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(time.time() -
                                                              tick))