Esempio n. 1
0
    def runner(self):
        '''
        Method to run the model
        '''
        # baseline compute
        self.base_ss_outputs = SS.run_SS(self.spec1)
        if self.spec1.time_path:
            self.base_tpi_output = TPI.run_TPI(self.spec1)
        # reform compute
        # a bit more complicated because will need stuff from baseline

        self.reform_ss_outputs = SS.run_SS(self.spec2)
        if self.spec2.time_path:
            self.reform_tpi_output = TPI.run_TPI(self.spec2)
def minstat(beta_guesses, *args):
    '''
    This function generates the weighted sum of squared differences
    between the model and data moments.

    Args:
    beta_guesses (array-like): a vector of length J with the betas
    args (tuple): length 6 tuple, variables needed for minimizer

    Returns:
    distance (scalar): weighted, squared deviation between data and
        model moments

    '''
    # unpack args tuple
    data_moments, W, p, client = args

    # Update beta in parameters object with beta guesses
    p.beta = beta_guesses

    # Solve model SS
    print('Baseline = ', p.baseline)
    ss_output = SS.run_SS(p, client=client)

    # Compute moments from model SS
    model_moments = calc_moments(ss_output, p)

    # distance with levels
    distance = np.dot(
        np.dot((np.array(model_moments) - np.array(data_moments)).T, W),
        np.array(model_moments) - np.array(data_moments))

    print('DATA and MODEL DISTANCE: ', distance)

    return distance
Esempio n. 3
0
def test_run_SS(input_path, expected_path):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', input_path))
    (income_tax_params, ss_params, iterative_params, chi_params,
     small_open_params, baseline, baseline_spending, baseline_dir) =\
        input_tuple
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z,
     p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho,
     p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau,
     lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth,
     m_wealth, p.b_ellipse, p.upsilon) = ss_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.alpha_T = np.ones(p.T + p.S) * alpha_T
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.lambdas = lambdas.reshape(p.J, 1)
    p.imm_rates = imm_rates.reshape(1, p.S)
    p.tax_func_type = 'DEP'
    p.baseline = baseline
    p.baseline_spending = baseline_spending
    p.baseline_dir = baseline_dir
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(
        etr_params.reshape(p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(
        mtrx_params.reshape(p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(
        mtry_params.reshape(p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.maxiter, p.mindist_SS = iterative_params
    p.chi_b, p.chi_n = chi_params
    p.small_open, ss_firm_r, ss_hh_r = small_open_params
    p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r
    p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r
    p.num_workers = 1
    test_dict = SS.run_SS(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', expected_path))

    # delete values key-value pairs that are not in both dicts
    del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b']
    del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss']
    test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] -
                                       test_dict['business_revenue'])
    del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss']
    del test_dict['resource_constraint_error'], test_dict['T_Css']
    test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss')

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Esempio n. 4
0
def test_run_SS(baseline, param_updates, filename, dask_client):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    if baseline is False:
        tax_func_path_baseline = os.path.join(CUR_PATH,
                                              'TxFuncEst_baseline.pkl')
        tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_policy.pkl')
        execute.runner(constants.BASELINE_DIR,
                       constants.BASELINE_DIR,
                       time_path=False,
                       baseline=True,
                       og_spec=param_updates,
                       run_micro=False,
                       tax_func_path=tax_func_path_baseline)
    else:
        tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_baseline.pkl')
    p = Specifications(baseline=baseline,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.get_tax_function_parameters(None,
                                  run_micro=False,
                                  tax_func_path=tax_func_path)
    test_dict = SS.run_SS(p, None)
    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', filename))

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Esempio n. 5
0
def test_constant_demographics_TPI():
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesss for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    output_base = "./OUTPUT"
    baseline_dir = "./OUTPUT"
    user_params = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2
    }
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError as oe:
            pass
    spec = Specifications(run_micro=False,
                          output_base=output_base,
                          baseline_dir=baseline_dir,
                          test=False,
                          time_path=True,
                          baseline=True,
                          reform={},
                          guid='')
    spec.update_specifications(user_params)
    print('path for tax functions: ', spec.output_base)
    spec.get_tax_function_parameters(None, False)
    # Run SS
    ss_outputs = SS.run_SS(spec, None)
    # save SS results
    utils.mkdirs(os.path.join(baseline_dir, "SS"))
    ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
    pickle.dump(ss_outputs, open(ss_dir, "wb"))
    # Save pickle with parameter values for the run
    param_dir = os.path.join(baseline_dir, "model_params.pkl")
    pickle.dump(spec, open(param_dir, "wb"))
    tpi_output = TPI.run_TPI(spec, None)
    print(
        'Max diff btwn SS and TP bsplus1 = ',
        np.absolute(tpi_output['bmat_splus1'][:spec.T, :, :] -
                    ss_outputs['bssmat_splus1']).max())
    print('Max diff btwn SS and TP Y = ',
          np.absolute(tpi_output['Y'][:spec.T] - ss_outputs['Yss']).max())
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
Esempio n. 6
0
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client):
    '''
    Test TPI.run_TPI function.  Provide inputs to function and
    ensure that output returned matches what it has been before.
    '''
    baseline_dir = os.path.join(CUR_PATH, 'baseline')
    if baseline:
        output_base = baseline_dir
    else:
        output_base = os.path.join(CUR_PATH, 'reform')
    p = Specifications(baseline=baseline,
                       baseline_dir=baseline_dir,
                       output_base=output_base,
                       test=True,
                       client=dask_client,
                       num_workers=NUM_WORKERS)
    p.update_specifications(param_updates)
    p.maxiter = 2  # this test runs through just two iterations
    p.get_tax_function_parameters(None,
                                  run_micro=False,
                                  tax_func_path=os.path.join(
                                      CUR_PATH, '..', 'data', 'tax_functions',
                                      'TxFuncEst_baseline_CPS.pkl'))

    # Need to run SS first to get results
    SS.ENFORCE_SOLUTION_CHECKS = False
    ss_outputs = SS.run_SS(p, None)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)

    TPI.ENFORCE_SOLUTION_CHECKS = False
    test_dict = TPI.run_TPI(p, None)
    expected_dict = utils.safe_read_pickle(filename)

    for k, v in expected_dict.items():
        try:
            assert (np.allclose(test_dict[k][:p.T],
                                v[:p.T],
                                rtol=1e-04,
                                atol=1e-04))
        except ValueError:
            assert (np.allclose(test_dict[k][:p.T, :, :],
                                v[:p.T, :, :],
                                rtol=1e-04,
                                atol=1e-04))
def compute_se(beta_hat, W, K, p, h=0.01, client=None):
    """
    Function to compute standard errors for the SMM estimator.

    Args:
        beta_hat (array-like): estimates of beta parameters
        W (Numpy array): weighting matrix
        K (int): number of moments
        p (OG-USA Specifications object): model parameters
        h (scalar): percentage to move parameters for numerical derivatives
        client (Dask Client object): Dask client

    Returns:
        beta_se (array-like): standard errors for beta estimates
        VCV_params (Numpy array): VCV matrix for parameter estimates

    """
    # compute numerical derivatives that will need for SE's
    model_moments_low = np.zeros((p.J, K))
    model_moments_high = np.zeros((p.J, K))
    beta_low = beta_hat
    beta_high = beta_hat
    for i in range(len(beta_hat)):
        # compute moments with downward change in param
        beta_low[i] = beta_hat[i] * (1 + h)
        p.beta = beta_low
        ss_output = ss_output = SS.run_SS(p, client=client)
        model_moments_low[i, :] = calc_moments(ss_output, p)
        # compute moments with upward change in param
        beta_high[i] = beta_hat[i] * (1 - h)
        p.beta = beta_low
        ss_output = ss_output = SS.run_SS(p, client=client)
        model_moments_high[i, :] = calc_moments(ss_output, p)

    deriv_moments = (model_moments_high - model_moments_low).T / (2 * h *
                                                                  beta_hat)
    VCV_params = np.linalg.inv(
        np.dot(np.dot(deriv_moments.T, W), deriv_moments))
    beta_se = (np.diag(VCV_params))**(1 / 2)

    return beta_se, VCV_params
Esempio n. 8
0
def test_constant_demographics_TPI(dask_client):
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesss for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    # Create output directory structure
    spec = Specifications(run_micro=False,
                          output_base=OUTPUT_DIR,
                          baseline_dir=OUTPUT_DIR,
                          test=False,
                          time_path=True,
                          baseline=True,
                          iit_reform={},
                          guid='',
                          client=dask_client,
                          num_workers=NUM_WORKERS)
    og_spec = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2,
        'r_gov_shift': 0.0,
        'zeta_D': [0.0, 0.0],
        'zeta_K': [0.0, 0.0],
        'debt_ratio_ss': 1.0,
        'initial_foreign_debt_ratio': 0.0,
        'start_year': 2019,
        'cit_rate': [0.0],
        'PIA_rate_bkt_1': 0.0,
        'PIA_rate_bkt_2': 0.0,
        'PIA_rate_bkt_3': 0.0,
        'eta':
        (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J))
    }
    spec.update_specifications(og_spec)
    spec.get_tax_function_parameters(None, False, tax_func_path=TAX_FUNC_PATH)
    # Run SS
    ss_outputs = SS.run_SS(spec, None)
    # save SS results
    utils.mkdirs(os.path.join(OUTPUT_DIR, "SS"))
    ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(ss_outputs, f)
    # Run TPI
    tpi_output = TPI.run_TPI(spec, None)
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
Esempio n. 9
0
def test_constant_demographics_TPI():
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesss for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    output_base = "./OUTPUT"
    baseline_dir = "./OUTPUT"
    user_params = {'constant_demographics': True,
                   'budget_balance': True,
                   'zero_taxes': True,
                   'maxiter': 2}
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError as oe:
            pass
    spec = Specifications(run_micro=False, output_base=output_base,
                          baseline_dir=baseline_dir, test=False,
                          time_path=True, baseline=True, reform={},
                          guid='')
    spec.update_specifications(user_params)
    print('path for tax functions: ', spec.output_base)
    spec.get_tax_function_parameters(None, False)
    # Run SS
    ss_outputs = SS.run_SS(spec, None)
    # save SS results
    utils.mkdirs(os.path.join(baseline_dir, "SS"))
    ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
    pickle.dump(ss_outputs, open(ss_dir, "wb"))
    # Save pickle with parameter values for the run
    param_dir = os.path.join(baseline_dir, "model_params.pkl")
    pickle.dump(spec, open(param_dir, "wb"))
    tpi_output = TPI.run_TPI(spec, None)
    print('Max diff btwn SS and TP bsplus1 = ',
          np.absolute(tpi_output['bmat_splus1'][:spec.T, :, :] -
                      ss_outputs['bssmat_splus1']).max())
    print('Max diff btwn SS and TP Y = ',
          np.absolute(tpi_output['Y'][:spec.T] -
                      ss_outputs['Yss']).max())
    assert(np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                       ss_outputs['bssmat_splus1']))
Esempio n. 10
0
def test_constant_demographics_TPI():
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time.
    In this case, given how initial guesss for the time
    path are made, the time path should be solved for on the first
    iteration and the values all along the time path should equal their
    steady-state values.
    '''
    output_base = os.path.join(CUR_PATH, 'OUTPUT')
    baseline_dir = output_base
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError:
            pass
    spec = Specifications(run_micro=False,
                          output_base=output_base,
                          baseline_dir=baseline_dir,
                          test=False,
                          time_path=True,
                          baseline=True,
                          iit_reform={},
                          guid='')
    og_spec = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2,
        'eta':
        (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J))
    }
    spec.update_specifications(og_spec)
    spec.get_tax_function_parameters(None, False)
    # Run SS
    ss_outputs = SS.run_SS(spec, None)
    # save SS results
    utils.mkdirs(os.path.join(baseline_dir, "SS"))
    ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
    pickle.dump(ss_outputs, open(ss_dir, "wb"))
    # Run TPI
    tpi_output = TPI.run_TPI(spec, None)
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
Esempio n. 11
0
def test_constant_demographics_TPI_small_open():
    '''
    This tests solves the model under the assumption of constant
    demographics, a balanced budget, and tax functions that do not vary
    over time, as well as with a small open economy assumption.
    '''
    # Create output directory structure
    spec = Specifications(run_micro=False,
                          output_base=OUTPUT_DIR,
                          baseline_dir=OUTPUT_DIR,
                          test=False,
                          time_path=True,
                          baseline=True,
                          iit_reform={},
                          guid='')
    og_spec = {
        'constant_demographics': True,
        'budget_balance': True,
        'zero_taxes': True,
        'maxiter': 2,
        'r_gov_shift': 0.0,
        'zeta_D': [0.0, 0.0],
        'zeta_K': [1.0],
        'debt_ratio_ss': 1.0,
        'initial_foreign_debt_ratio': 0.0,
        'start_year': 2019,
        'cit_rate': [0.0],
        'PIA_rate_bkt_1': 0.0,
        'PIA_rate_bkt_2': 0.0,
        'PIA_rate_bkt_3': 0.0,
        'eta':
        (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J))
    }
    spec.update_specifications(og_spec)
    spec.get_tax_function_parameters(None, False, tax_func_path=TAX_FUNC_PATH)
    # Run SS
    ss_outputs = SS.run_SS(spec, None)
    # save SS results
    utils.mkdirs(os.path.join(OUTPUT_DIR, "SS"))
    ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(ss_outputs, f)
    # Run TPI
    tpi_output = TPI.run_TPI(spec, None)
    assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
                        ss_outputs['bssmat_splus1']))
Esempio n. 12
0
def test_run_SS(input_path, expected_path):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', input_path))
    (income_tax_params, ss_params, iterative_params, chi_params,
     small_open_params, baseline, baseline_spending, baseline_dir) =\
        input_tuple
    income_tax_params = ('DEP', ) + income_tax_params
    test_dict = SS.run_SS(income_tax_params, ss_params, iterative_params,
                          chi_params, small_open_params, baseline,
                          baseline_spending, baseline_dir)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', expected_path))

    for k, v in expected_dict.items():
        assert (np.allclose(test_dict[k], v))
Esempio n. 13
0
        def matcher(d_guess, params):
            income_tax_params, receipts_to_match, ss_params, iterative_params,\
                              chi_params, baseline, baseline_dir = params
            analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
            etr_params[:, 3] = d_guess
            mtrx_params[:, 3] = d_guess
            mtry_params[:, 3] = d_guess
            income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
            ss_outputs = SS.run_SS(income_tax_params,
                                   ss_params,
                                   iterative_params,
                                   chi_params,
                                   baseline,
                                   fix_transfers=fix_transfers,
                                   baseline_dir=baseline_dir)

            receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
            error = abs(receipts_to_match - receipts_new)
            if d_guess <= 0:
                error = 1e14
            print 'Error in taxes:', error
            return error
Esempio n. 14
0
 def solve_model(params, d):
     income_tax_params, ss_params, iterative_params,\
                       chi_params, baseline ,baseline_dir = params
     analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
     etr_params[:, 3] = d
     mtrx_params[:, 3] = d
     mtry_params[:, 3] = d
     income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
     ss_outputs = SS.run_SS(income_tax_params,
                            ss_params,
                            iterative_params,
                            chi_params,
                            baseline,
                            fix_transfers=fix_transfers,
                            baseline_dir=baseline_dir)
     ss_dir = os.path.join("./OUTPUT_INCOME_REFORM/sigma2.0",
                           "SS/SS_vars.pkl")
     pickle.dump(ss_outputs, open(ss_dir, "wb"))
     receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
     new_error = receipts_to_match - receipts_new
     print 'Error in taxes:', error
     print 'New income tax:', d
     return new_error
Esempio n. 15
0
def test_run_TPI():
    # Test TPI.run_TPI function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'run_TPI_inputs.pkl'))
    (income_tax_params, tpi_params, iterative_params, small_open_params,
     initial_values, SS_values, fiscal_params, biz_tax_params,
     output_dir, baseline_spending) = input_tuple
    tpi_params = tpi_params + [True]
    initial_values = initial_values + (0.0,)

    p = Specifications()
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params

    new_param_values = {
        'J': J,
        'S': S,
        'T': T,
        'eta': (np.ones((S, J)) / (S * J))
    }
    # update parameters instance with new values for test
    p.update_specifications(new_param_values, raise_errors=False)
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.eta = p.omega.reshape(T + S, S, 1) * lambdas.reshape(1, J)
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.small_open, ss_firm_r, ss_hh_r = small_open_params
    p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r
    p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r
    p.maxiter, p.mindist_SS, p.mindist_TPI = iterative_params
    (p.budget_balance, alpha_T, alpha_G, p.tG1, p.tG2, p.rho_G,
     p.debt_ratio_ss) = fiscal_params
    p.alpha_T = np.concatenate((alpha_T, np.ones(40) * alpha_T[-1]))
    p.alpha_G = np.concatenate((alpha_G, np.ones(40) * alpha_G[-1]))
    (tau_b, delta_tau) = biz_tax_params
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :]
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :]
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :]
    p.lambdas = lambdas.reshape(p.J, 1)
    p.output = output_dir
    p.baseline_spending = baseline_spending
    p.frac_tax_payroll = 0.5 * np.ones(p.T + p.S)
    p.num_workers = 1
    (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,
     p.omega_S_preTP, initial_debt, D0) = initial_values

    # Need to run SS first to get results
    ss_outputs = SS.run_SS(p, None)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS/SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS/SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)

    test_dict = TPI.run_TPI(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', 'run_TPI_outputs.pkl'))

    # delete values key-value pairs that are not in both dicts
    del expected_dict['I_total']
    del test_dict['etr_path'], test_dict['mtrx_path'], test_dict['mtry_path']
    del test_dict['bmat_s']
    test_dict['b_mat'] = test_dict.pop('bmat_splus1')
    test_dict['REVENUE'] = test_dict.pop('total_revenue')
    test_dict['T_H'] = test_dict.pop('TR')
    test_dict['IITpayroll_revenue'] = (test_dict['REVENUE'][:160] -
                                       test_dict['business_revenue'])
    del test_dict['T_P'], test_dict['T_BQ'], test_dict['T_W']
    del test_dict['y_before_tax_mat'], test_dict['K_f'], test_dict['K_d']
    del test_dict['D_d'], test_dict['D_f']
    del test_dict['new_borrowing_f'], test_dict['debt_service_f']
    del test_dict['iit_revenue'], test_dict['payroll_tax_revenue']
    del test_dict['resource_constraint_error'], test_dict['T_C']
    del test_dict['r_gov'], test_dict['r_hh'], test_dict['tr_path']

    for k, v in expected_dict.items():
        try:
            assert(np.allclose(test_dict[k], v, rtol=1e-04, atol=1e-04))
        except ValueError:
            assert(np.allclose(test_dict[k], v[:p.T, :, :], rtol=1e-04,
                               atol=1e-04))
Esempio n. 16
0
def chi_estimate(p, client=None):
    '''
    --------------------------------------------------------------------
    This function calls others to obtain the data momements and then
    runs the simulated method of moments estimation by calling the
    minimization routine.

    INPUTS:
    income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
    ss_parameters         = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
                            g_n_ss, tau_payroll, retire, mean_income_data,\
                            h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
    iterative_params      = [2,] vector, vector with max iterations and tolerance
                             for SS solution
    chi_guesses           = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
    baseline_dir          = string, path where baseline results located


    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
    wealth.compute_wealth_moments()
    labor.labor_data_moments()
    minstat()

    OBJECTS CREATED WITHIN FUNCTION:
    wealth_moments     = [J+2,] array, wealth moments from data
    labor_moments      = [S,] array, labor moments from data
    data_moments       = [J+2+S,] array, wealth and labor moments stacked
    bnds               = [S+J,] array, bounds for parameter estimates
    chi_guesses_flat   =  [J+S,] vector, initial guesses of chi_b and chi_n stacked
    min_arg            = length 6 tuple, variables needed for minimizer
    est_output         = dictionary, output from minimizer
    chi_params         = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
    objective_func_min = scalar, minimum of statistical objective function


    OUTPUT:
    ./baseline_dir/Calibration/chi_estimation.pkl


    RETURNS: chi_params
    --------------------------------------------------------------------
    '''

    baseline_dir = "./OUTPUT"
    #chi_b_guess = np.ones(80)

    # a0 = 5.38312524e+01
    # a1 = -1.55746248e+00
    # a2 = 1.77689237e-02
    # a3 = -8.04751667e-06
    # a4 = 5.65432019e-08
    """ Kei's Vals
    a0 = 170
    a1 = -2.19154735e+00
    a2 = -2.22817460e-02
    a3 = 4.49993507e-04
    a4 = -1.34197054e-06
    """
    """ Adam's Vals 1
    a0 = 2.59572155e+02
    a1 = -2.35122641e+01
    a2 = 4.27581467e-01
    a3 = -3.40808933e-03
    a4 = 1.00404321e-05
    """

    a0 = 1.10807470e+03  #5.19144310e+02
    #a0 = 1.10807470e+03#5.19144310e+02
    a1 = -1.05805189e+02  #-4.70245283e+01
    a2 = 1.92411660e+00  #8.55162933e-01
    a3 = -1.53364020e-02  #-6.81617866e-03
    a4 = 4.51819445e-05  #2.00808642e-05

    # a0 = 2.07381e+02
    # a1 = -1.03143105e+01
    # a2 = 1.42760562e-01
    # a3 = -8.41089078e-04
    # a4 = 1.85173227e-06

    # sixty_plus_chi = 300
    params_init = np.array([a0, a1, a2, a3, a4])

    # Generate labor data moments
    labor_hours = np.array([167, 165, 165, 165, 165, 166, 165, 165,
                            164])  #, 166, 164])

    labor_part_rate = np.array(
        [0.69, 0.849, 0.849, 0.847, 0.847, 0.859, 0.859, 0.709,
         0.709])  #, 0.212, 0.212])

    employ_rate = np.array(
        [0.937, 0.954, 0.954, 0.966, 0.966, 0.97, 0.97, 0.968,
         0.968])  #, 0.978, 0.978])

    labor_hours_adj = labor_hours * labor_part_rate * employ_rate

    # get fraction of time endowment worked (assume time
    # endowment is 24 hours minus required time to sleep 6.5 hours)
    labor_moments = labor_hours_adj * 12 / (365 * 17.5)

    #labor_moments[9] = 0.1
    #labor_moments[10] = 0.1

    # combine moments
    data_moments = np.array(list(labor_moments.flatten()))

    # weighting matrix
    W = np.identity(p.J + 2 + p.S)
    W = np.identity(9)

    ages = np.linspace(20, 65, p.S // 2 + 5)
    #ages = np.linspace(20, 100, p.S)

    est_output = opt.minimize(minstat_init_calibrate, params_init,\
                args=(p, client, data_moments, W, ages),\
                method="L-BFGS-B",\
                tol=1e-15, options={'eps': 0.1})
    a0, a1, a2, a3, a4 = est_output.x
    #chi_n = chebyshev_func(ages, a0, a1, a2, a3, a4)
    chi_n = np.ones(p.S)
    #ages_full = np.linspace(20, 100, p.S)
    #chi_n = chebyshev_func(ages_full, a0, a1, a2, a3, a4)

    chi_n[:p.S // 2 + 5] = chebyshev_func(ages, a0, a1, a2, a3, a4)
    slope = 1500  #chi_n[p.S // 2 + 5 - 1] - chi_n[p.S // 2 + 5 - 2]
    chi_n[p.S // 2 + 5 -
          1:] = (np.linspace(65, 100, 36) - 65) * slope + chi_n[p.S // 2 + 5 -
                                                                1]

    chi_n[chi_n < 0.5] = 0.5
    p.chi_n = chi_n
    print('PARAMS for Chebyshev:', est_output.x)
    with open("output.txt", "a") as text_file:
        text_file.write('\nPARAMS for Chebyshev: ' + str(est_output.x) + '\n')
    pickle.dump(chi_n, open("chi_n.p", "wb"))

    ss_output = SS.run_SS(p)
    return ss_output
Esempio n. 17
0
def test_run_TPI():
    # Test TPI.run_TPI function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_inputs.pkl'))
    (income_tax_params, tpi_params, iterative_params, small_open_params,
     initial_values, SS_values, fiscal_params, biz_tax_params,
     output_dir, baseline_spending) = input_tuple
    tpi_params = tpi_params + [True]
    initial_values = initial_values + (0.0,)

    p = Specifications()
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params

    new_param_values = {
        'J': J,
        'S': S,
        'T': T
    }
    # update parameters instance with new values for test
    p.update_specifications(new_param_values, raise_errors=False)
    (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau,
     tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas,
     p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth,
     p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n,
     theta, p.baseline) = tpi_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.small_open, ss_firm_r, ss_hh_r = small_open_params
    p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r
    p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r
    p.maxiter, p.mindist_SS, p.mindist_TPI = iterative_params
    (p.budget_balance, alpha_T, alpha_G, p.tG1, p.tG2, p.rho_G,
     p.debt_ratio_ss) = fiscal_params
    p.alpha_T = np.concatenate((alpha_T, np.ones(40) * alpha_T[-1]))
    p.alpha_G = np.concatenate((alpha_G, np.ones(40) * alpha_G[-1]))
    (tau_b, delta_tau) = biz_tax_params
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :]
    p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :]
    p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :]
    p.lambdas = lambdas.reshape(p.J, 1)
    p.output = output_dir
    p.baseline_spending = baseline_spending
    p.num_workers = 1
    (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,
     p.omega_S_preTP, initial_debt, D0) = initial_values

    # Need to run SS first to get results
    ss_outputs = SS.run_SS(p, None)

    if p.baseline:
        utils.mkdirs(os.path.join(p.baseline_dir, "SS"))
        ss_dir = os.path.join(p.baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(p.output_base, "SS"))
        ss_dir = os.path.join(p.output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))

    test_dict = TPI.run_TPI(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data/run_TPI_outputs.pkl'))

    # delete values key-value pairs that are not in both dicts
    del test_dict['etr_path'], test_dict['mtrx_path'], test_dict['mtry_path']
    del test_dict['bmat_s']
    test_dict['b_mat'] = test_dict.pop('bmat_splus1')
    test_dict['REVENUE'] = test_dict.pop('total_revenue')
    test_dict['IITpayroll_revenue'] = (test_dict['REVENUE'][:160] -
                                       test_dict['business_revenue'])
    del test_dict['T_P'], test_dict['T_BQ'], test_dict['T_W']
    del test_dict['resource_constraint_error'], test_dict['T_C']
    del test_dict['r_gov'], test_dict['r_hh']

    for k, v in expected_dict.items():
        try:
            assert(np.allclose(test_dict[k], v, rtol=1e-04, atol=1e-04))
        except ValueError:
            assert(np.allclose(test_dict[k], v[:p.T, :, :], rtol=1e-04,
                               atol=1e-04))
Esempio n. 18
0
def runner(output_base, baseline_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, wealth, labor, demog, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, 
                                     start_year=user_params['start_year'], reform=reform, guid=guid)
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)


    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(run_params['lambdas'], run_params['J'], run_params['flag_graphs'], output_dir=output_base)

    # Generate labor data moments
    labor.labor_data_moments(run_params['flag_graphs'], output_dir=output_base)

    
    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']


    '''
    ------------------------------------------------------------------------
        Run SS 
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, baseline, 
                                     baseline_dir=baseline_dir)

    '''
    ------------------------------------------------------------------------
        Pickle SS results 
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_dir, "SS"))
        ss_dir = os.path.join(output_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))


    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    sim_params['input_dir'] = output_base
    sim_params['baseline_dir'] = baseline_dir
    

    income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(**sim_params)

    # ss_outputs['income_tax_params'] = income_tax_params
    # ss_outputs['wealth_tax_params'] = wealth_tax_params
    # ss_outputs['ellipse_params'] = ellipse_params
    # ss_outputs['parameters'] = parameters
    # ss_outputs['N_tilde'] = N_tilde
    # ss_outputs['omega_stationary'] = omega_stationary
    # ss_outputs['K0'] = K0
    # ss_outputs['b_sinit'] = b_sinit
    # ss_outputs['b_splus1init'] = b_splus1init
    # ss_outputs['L0'] = L0
    # ss_outputs['Y0'] = Y0
    # ss_outputs['r0'] = r0
    # ss_outputs['BQ0'] = BQ0
    # ss_outputs['T_H_0'] = T_H_0
    # ss_outputs['factor_ss'] = factor
    # ss_outputs['tax0'] = tax0
    # ss_outputs['c0'] = c0
    # ss_outputs['initial_b'] = initial_b
    # ss_outputs['initial_n'] = initial_n
    # ss_outputs['tau_bq'] = tau_bq
    # ss_outputs['g_n_vector'] = g_n_vector
    # ss_outputs['output_dir'] = output_base


    # with open("ss_outputs.pkl", 'wb') as fp:
    #     pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_TPI(income_tax_params, 
        tpi_params, iterative_params, initial_values, SS_values, output_dir=output_base)


    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)
Esempio n. 19
0
def minstat(params, *args):
    '''
    --------------------------------------------------------------------
    This function generates the weighted sum of squared differences
    between the model and data moments.

    INPUTS:
    chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
    arg         = length 6 tuple, variables needed for minimizer

    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
    SS.run_SS()
    calc_moments()

    OBJECTS CREATED WITHIN FUNCTION:
    ss_output     = dictionary, variables from SS of model
    model_moments = [J+2+S,] array, moments from the model solution
    distance      = scalar, weighted, squared deviation between data and model moments

    RETURNS: distance
    --------------------------------------------------------------------
    '''

    a0, a1, a2, a3, a4 = params
    p, client, data_moments, W, ages = args
    chi_n = np.ones(p.S)
    #chi_n = chebyshev_func(ages, a0, a1, a2, a3, a4)
    chi_n[:p.S // 2 + 5] = chebyshev_func(ages, a0, a1, a2, a3, a4)
    #chi_n[p.S // 2 + 5:] = sixty_plus_chi
    slope = chi_n[p.S // 2 + 5 - 1] - chi_n[p.S // 2 + 5 - 2]
    chi_n[p.S // 2 + 5 -
          1:] = (np.linspace(65, 100, 36) - 65) * slope + chi_n[p.S // 2 + 5 -
                                                                1]
    chi_n[chi_n < 0.5] = 0.5

    p.chi_n = chi_n
    #print(chi_n)

    #with open("output.txt", "a") as text_file:
    #    text_file.write('\nPARAMS AT START\n' + str(params) + '\n')
    print("-----------------------------------------------------")
    print('PARAMS AT START' + str(params))
    print("-----------------------------------------------------")

    try:
        ss_output = SS.run_SS(p, client)
    except:
        #with open("output.txt", "a") as text_file:
        #    text_file.write('\nSteady state not found\n' + str(params) + '\n')
        print("-----------------------------------------------------")
        print("Steady state not found")
        print("-----------------------------------------------------")
        return 1e100

    #with open("output.txt", "a") as text_file:
    #    text_file.write('\nPARAMS AT END\n' + str(params) + '\n')
    print("-----------------------------------------------------")
    print('PARAMS AT END', params)
    print("-----------------------------------------------------")
    model_moments = calc_moments(ss_output, p.omega_SS, p.lambdas, p.S, p.J)
    #with open("output.txt", "a") as text_file:
    #    text_file.write('\nModel moments:\n' + str(model_moments) + '\n')
    print('Model moments:', model_moments)
    print("-----------------------------------------------------")

    # distance with levels
    distance = np.dot(
        np.dot((np.array(model_moments[:9]) - np.array(data_moments)).T, W),
        np.array(model_moments[:9]) - np.array(data_moments))
    #distance = ((np.array(model_moments) - np.array(data_moments))**2).sum()
    #with open("output.txt", "a") as text_file:
    #    text_file.write('\nDATA and MODEL DISTANCE: ' + str(distance) + '\n')
    print('DATA and MODEL DISTANCE: ', distance)

    # # distance with percentage diffs
    # distance = (((model_moments - data_moments)/data_moments)**2).sum()

    return distance
Esempio n. 20
0
def runner(output_base, baseline_dir, test=False, time_path=True, baseline=False,
  analytical_mtrs=False, age_specific=False, reform={}, user_params={},
  guid='', run_micro=True, small_open=False, budget_balance=False, baseline_spending=False):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, demographics, income, utils
    from ogusa import txfunc

    tick = time.time()
    
    # Make sure options are internally consistent
    if baseline==True and baseline_spending==True:
        print 'Inconsistent options. Setting <baseline_spending> to False, leaving <baseline> True.'
        baseline_spending = False
    if budget_balance==True and baseline_spending==True:
        print 'Inconsistent options. Setting <baseline_spending> to False, leaving <budget_balance> True.'
        baseline_spending = False

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific,
                                     start_year=user_params['start_year'], reform=reform, guid=guid)
    print 'In runner, baseline is ', baseline
    run_params = ogusa.parameters.get_parameters(test=test, baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs
    run_params['small_open'] = small_open
    run_params['budget_balance'] = budget_balance

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating frisch and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)
    if 'debt_ratio_ss' in user_params:
        run_params['debt_ratio_ss']=user_params['debt_ratio_ss']

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        ending_age = run_params['ending_age']
        starting_age = run_params['starting_age']
        S = run_params['S']
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)
        
    # Modify transfer & spending ratios based on user input.
    if 'T_shifts' in user_params:
        if baseline_spending==False:
            print 'updating ALPHA_T with T_shifts in first', user_params['T_shifts'].size, 'periods.'                                            
            T_shifts = np.concatenate((user_params['T_shifts'], np.zeros(run_params['ALPHA_T'].size - user_params['T_shifts'].size)), axis=0)
            run_params['ALPHA_T'] = run_params['ALPHA_T'] + T_shifts
    if 'G_shifts' in user_params:
        if baseline_spending==False:
            print 'updating ALPHA_G with G_shifts in first', user_params['G_shifts'].size, 'periods.'                                            
            G_shifts = np.concatenate((user_params['G_shifts'], np.zeros(run_params['ALPHA_G'].size - user_params['G_shifts'].size)), axis=0)
            run_params['ALPHA_G'] = run_params['ALPHA_G'] + G_shifts

    from ogusa import SS, TPI

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'gamma', 'epsilon', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'small_open', 'budget_balance', 'ss_firm_r', 'ss_hh_r', 'tpi_firm_r', 'tpi_hh_r',
                'tG1', 'tG2', 'alpha_T', 'alpha_G', 'ALPHA_T', 'ALPHA_G', 'rho_G', 'debt_ratio_ss',
                'tau_b', 'delta_tau',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates','e', 'rho',
                'initial_debt','omega_S_preTP']

    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params
    income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params, baseline, baseline_spending,
                                     baseline_dir=baseline_dir)

    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''

        sim_params['baseline'] = baseline
        sim_params['baseline_spending'] = baseline_spending
        sim_params['input_dir'] = output_base
        sim_params['baseline_dir'] = baseline_dir


        income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params = TPI.create_tpi_params(**sim_params)

        tpi_output, macro_output = TPI.run_TPI(income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, 
                                               SS_values, fiscal_params, biz_tax_params, output_dir=output_base, baseline_spending=baseline_spending)

        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        pickle.dump(tpi_output, open(tpi_vars, "wb"))

        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl")
        pickle.dump(macro_output, open(tpi_vars, "wb"))


        print "Time path iteration complete."
    print "It took {0} seconds to get that part done.".format(time.time() - tick)
Esempio n. 21
0
def runner(output_base,
           baseline_dir,
           test=False,
           time_path=True,
           baseline=True,
           iit_reform={},
           og_spec={},
           guid='',
           run_micro=True,
           data=None,
           client=None,
           num_workers=1):
    '''
    This function runs the OG-USA model, solving for the steady-state
    and (optionally) the time path equilibrium.

    Args:
        output_base (str): path to save output to
        baseline_dir (str): path where baseline model results are saved
        test (bool): whether to run model in test mode (which has
            a smaller state space and higher tolerances for solution)
        time_path (bool): whether to solve for the time path equlibrium
        baseline (bool): whether the model run is the baseline run
        iit_reform (dict): Tax-Calculator policy dictionary
        og_spec (dict): dictionary with updates to default
            parameters in OG-USA
        guid (str): id for OG-USA run
        run_micro (bool): whether to estimate tax functions from micro
            data or load saved parameters from pickle file
        data (str or Pandas DataFrame): path to or data to use in
            Tax-Calculator
        client (Dask client object): client
        num_workers (int): number of workers to use for parallelization
            with Dask

    Returns:
        None

    '''

    tick = time.time()
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError:
            pass

    print('In runner, baseline is ', baseline)

    # Get parameter class
    # Note - set run_micro false when initially load class
    # Update later with call to spec.get_tax_function_parameters()
    spec = Specifications(run_micro=False,
                          output_base=output_base,
                          baseline_dir=baseline_dir,
                          test=test,
                          time_path=time_path,
                          baseline=baseline,
                          iit_reform=iit_reform,
                          guid=guid,
                          data=data,
                          client=client,
                          num_workers=num_workers)

    spec.update_specifications(og_spec)
    print('path for tax functions: ', spec.output_base)
    spec.get_tax_function_parameters(client, run_micro)
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''
    ss_outputs = SS.run_SS(spec, client=client)
    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
        print('JUST SAVED SS output to ', ss_dir)
        # Save pickle with parameter values for the run
        param_dir = os.path.join(baseline_dir, "model_params.pkl")
        with open(param_dir, "wb") as f:
            cloudpickle.dump((spec), f)
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS", "SS_vars.pkl")
        with open(ss_dir, "wb") as f:
            pickle.dump(ss_outputs, f)
        # Save pickle with parameter values for the run
        param_dir = os.path.join(output_base, "model_params.pkl")
        with open(param_dir, "wb") as f:
            cloudpickle.dump((spec), f)

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''
        tpi_output = TPI.run_TPI(spec, client=client)
        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        with open(tpi_vars, "wb") as f:
            pickle.dump(tpi_output, f)

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(time.time() -
                                                              tick))
def chi_estimate(p, client=None):
    '''
    COPYRIGHT MARCH 22, 2019 KEI IRIZAWA AND ADAM OPPENHEIMER
    '''
    # Generate labor data moments
    labor_hours = np.array([167, 165, 165, 165, 165, 166, 165, 165, 164])#, 166, 164])
    labor_part_rate = np.array([0.69, 0.849, 0.849, 0.847, 0.847, 0.859, 0.859, 0.709, 0.709])#, 0.212, 0.212])
    employ_rate = np.array([0.937, 0.954, 0.954, 0.966, 0.966, 0.97, 0.97, 0.968, 0.968])#, 0.978, 0.978])
    labor_hours_adj = labor_hours * labor_part_rate * employ_rate
    labor_moments = labor_hours_adj * 12 / (365 * 17.5)
    data_moments_trunc = np.array(list(labor_moments.flatten()))
    #ages = np.array([20, 25, 30, 35, 40, 45, 50, 55, 60]) + 2.5
    #labor_fun = si.splrep(ages, data_moments_trunc)
    #ages_full = np.linspace(21, 65, p.S // 2 + 5)
    #data_moments = si.splev(ages_full, labor_fun)
    data_moments = np.repeat(data_moments_trunc, 5) # Set labor values to equal average over bin

    # a0 = 1.25108169e+03
    # a1 = -1.19873316e+02
    # a2 = 2.20570513e+00
    # a3 = -1.76536132e-02
    # a4 = 5.19262962e-05

    # chi_n = np.ones(p.S)
    # chi_n[:p.S // 2 + 5] = chebyshev_func(ages_full, a0, a1, a2, a3, a4)
    # slope = chi_n[p.S // 2 + 5 - 1] - chi_n[p.S // 2 + 5 - 2]
    # chi_n[p.S // 2 + 5 - 1:] = (np.linspace(65, 100, 36) - 65) * slope + chi_n[p.S // 2 + 5 - 1]
    # chi_n[chi_n < 0.5] = 0.5
    chi_n = pickle.load(open("chi_n.p", "rb"))
    p.chi_n = chi_n

    model_moments = find_moments(p, client)
    
    labor_below = np.zeros(p.S // 2 + 5)
    labor_above = np.ones(p.S // 2 + 5) * np.inf
    chi_below = np.zeros(p.S // 2 + 5)
    chi_above = np.zeros(p.S // 2 + 5)

    chi_prev = np.zeros(p.S // 2 + 5)
    consec_above = np.zeros(p.S // 2 + 5)
    consec_below = np.zeros(p.S // 2 + 5)

    print('About to start the while loop')
    eps_val = 0.001
    #still_calibrate = ((abs(model_moments - data_moments) > eps_val) & (chi_n[:45] > 0.5))\
    #    | ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
    still_calibrate = (abs(model_moments - data_moments) > eps_val) & ((chi_n[:45] > 0.5)\
        | ((chi_n[:45] <= 0.5) & (labor_above > data_moments)))
    moments_calibrated_per_step = []

    while still_calibrate.any():
        ### Check that 2 consecutive chi_n estimates aren't equal
        if (chi_n[:45] == chi_prev).all():
            raise RuntimeError('Calibration failure. No chi_n values changed between guesses')
        chi_prev = np.copy(chi_n[:45])
        ### Set above/below arrays based on model moments
        above_data_below_above = (model_moments > data_moments) #& (model_moments < labor_above)
        below_data_above_below = (model_moments < data_moments) #& (model_moments > labor_below)
        # Had to comment out checking if closer than previous guess because if
        # the result moves, the convex combination might be outside the range
        # and it gets stuck in an infinite loop because the guess never improves
        labor_above[above_data_below_above] = model_moments[above_data_below_above]
        chi_above[above_data_below_above] = chi_n[:45][above_data_below_above]
        labor_below[below_data_above_below] = model_moments[below_data_above_below]
        chi_below[below_data_above_below] = chi_n[:45][below_data_above_below]
        ### Set consecutive above/below values
        consec_above[above_data_below_above] += 1
        consec_above[below_data_above_below] = 0
        consec_below[below_data_above_below] += 1
        consec_below[above_data_below_above] = 0
        consec = (consec_above >= 4) | (consec_below >= 4)
        ### Create arrays for labor boundaries
        print(str(np.sum(still_calibrate)) + ' labor moments are still being calibrated')
        moments_calibrated_per_step.append(np.sum(still_calibrate))
        print('Moments calibrated at each iteration (including this iteration):')
        print(moments_calibrated_per_step)
        both = (((labor_below > 0) & (labor_above < np.inf)) |\
            ((labor_below == 0) & (labor_above == np.inf))) & (still_calibrate)
        above = (labor_below == 0) & (labor_above < np.inf) & (still_calibrate)
        below = (labor_below > 0) & (labor_above == np.inf) & (still_calibrate)
        print(str(np.sum(both)) + ' labor moments are being convexly shifted')
        print(str(np.sum(above)) + ' labor moments are being shifted down')
        print(str(np.sum(below)) + ' labor moments are being shifted up')
        ### Calculate convex combination factor
        above_dist = abs(labor_above - data_moments)
        below_dist = abs(data_moments - labor_below)
        total_dist = above_dist + below_dist
        above_factor = below_dist / total_dist
        below_factor = above_dist / total_dist
        #### Adjust by convex combination factor
        chi_n[:45][both] = np.copy(below_factor[both] * chi_below[both] +\
            above_factor[both] * chi_above[both])
        invalid_factor = np.isnan(chi_n[:45][both]) # Modified
        chi_n[:45][both][invalid_factor] = np.copy(0.5 * (chi_below[both][invalid_factor] + chi_above[both][invalid_factor])) # Modified
        ### Adjust values that aren't bounded both above and below by labor error factors
        error_factor = model_moments / data_moments
        chi_n[:45][above] = np.copy(np.minimum(error_factor[above] * chi_above[above], 1.02 * chi_above[above]))#np.copy(1.02 * chi_above[above])
        chi_n[:45][below] = np.copy(np.maximum(error_factor[below] * chi_below[below], 0.98 * chi_below[below]))#np.copy(0.98 * chi_below[below])
        ### Solve moments using new chi_n guesses
        p.chi_n = chi_n
        model_moments = find_moments(p, client)
        print('-------------------------------')
        print('New model moments:')
        print(list(model_moments))
        print('Chi_n:')
        print(list(chi_n))
        print('-------------------------------')
        print('Labor moment differences:')
        print(model_moments[still_calibrate] - data_moments[still_calibrate])
        print('-------------------------------')
        ### Redefine still_calibrate and both based on new model moments
        still_calibrate = (abs(model_moments - data_moments) > eps_val) & ((chi_n[:45] > 0.5)\
            | ((chi_n[:45] <= 0.5) & (labor_above > data_moments)))
        both = (((labor_below > 0) & (labor_above < np.inf)) |\
            ((labor_below == 0) & (labor_above == np.inf))) & (still_calibrate)
        print('Chi differences:')
        print(chi_below[still_calibrate] - chi_above[still_calibrate])
        print('-------------------------------')
        print('Chi below:')
        print(chi_below[still_calibrate])
        print('-------------------------------')
        print('Chi above:')
        print(chi_above[still_calibrate])
        print('-------------------------------')
        print('Labor above:')
        print(labor_above[still_calibrate])
        print('-------------------------------')
        print('Labor below:')
        print(labor_below[still_calibrate])
        print('-------------------------------')
        ### Fix stuck boundaries
        #still_calibrate_stuck_1 = ((abs(model_moments - data_moments) > eps_val) & (chi_n[:45] > 0.5))\
        #| ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
        #still_calibrate_stuck_2 = ((abs(model_moments - data_moments) > 10 * eps_val) & (chi_n[:45] > 0.5))\
        #| ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
        #stuck_1 = ((chi_below - chi_above) < 10 * eps_val) & (still_calibrate_stuck_1)
        #stuck_2 = ((chi_below - chi_above) < 1e3 * eps_val) & (still_calibrate_stuck_2)
        #stuck = (stuck_1) | (stuck_2)
        stuck = ((chi_below - chi_above) < 10) & (consec) & (both)
        if (stuck).any():
            consec_above[stuck] = 0
            consec_below[stuck] = 0
            check_above_stuck = (stuck) & (model_moments > data_moments)
            check_below_stuck = (stuck) & (model_moments < data_moments)
            print(str(np.sum(check_above_stuck)) + ' labor moments are being checked to see if they are too high')
            print(str(np.sum(check_below_stuck)) + ' labor moments are being checked to see if they are too low')
            ### Make sure chi_n bounds are still valid
            check_chi_n = chi_n.copy()
            check_chi_n[:45][check_above_stuck] = np.copy(chi_below[check_above_stuck])
            check_chi_n[:45][check_below_stuck] = np.copy(chi_above[check_below_stuck])
            p.chi_n = check_chi_n
            check_model_moments = find_moments(p, client)
            above_stuck = (check_above_stuck) & (check_model_moments > data_moments)
            below_stuck = (check_below_stuck) & (check_model_moments < data_moments)
            print(str(np.sum(above_stuck)) + ' labor moments are being unstuck from being too high')
            print(str(np.sum(below_stuck)) + ' labor moments are being unstuck from being too low')
            total_stuck = str(np.sum(above_stuck) + np.sum(below_stuck))
            moments_calibrated_per_step.append(str(np.sum(stuck)) + '(checked) ' + total_stuck + '(stuck)')
            print('Moments calibrated at each iteration (including this iteration):')
            print(moments_calibrated_per_step)
            labor_below[above_stuck] = 0
            labor_above[below_stuck] = np.inf
            chi_below[above_stuck] *= 2
            chi_above[below_stuck] *= 0.5
            still_calibrate = (abs(check_model_moments - data_moments) > eps_val) & ((check_chi_n[:45] > 0.5)\
                | ((check_chi_n[:45] <= 0.5) & (labor_above > data_moments)))
            if not (still_calibrate).any():
                chi_n = check_chi_n
                model_moments = check_model_moments
        else:
            print('No labor moments are stuck')
        

    print('-------------------------------')
    print('Calibration complete')
    print('Final Chi_n:')
    print(list(chi_n))
    print('-------------------------------')
    print('Final model moments:')
    print(list(model_moments))
    print('-------------------------------')
    print('Moments calibrated at each iteration:')
    print(moments_calibrated_per_step)
    print('Number of iterations to solve:')
    print(len(moments_calibrated_per_step))
    print('-------------------------------')

    with open("output.txt", "a") as text_file:
        text_file.write('\nFinal model moments: ' + str(model_moments) + '\n')
        text_file.write('\nFinal chi_n: ' + str(chi_n) + '\n')
    pickle.dump(chi_n, open("chi_n.p", "wb"))
    stop
    ss_output = SS.run_SS(p)
    return ss_output
Esempio n. 23
0
def runner(output_base,
           baseline_dir,
           test=False,
           time_path=True,
           baseline=True,
           reform={},
           user_params={},
           guid='',
           run_micro=True,
           data=None,
           client=None,
           num_workers=1):

    tick = time.time()
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError:
            pass

    print('In runner, baseline is ', baseline)

    # Get parameter class
    # Note - set run_micro false when initially load class
    # Update later with call to spec.get_tax_function_parameters()
    spec = Specifications(run_micro=False,
                          output_base=output_base,
                          baseline_dir=baseline_dir,
                          test=test,
                          time_path=time_path,
                          baseline=baseline,
                          reform=reform,
                          guid=guid,
                          data=data,
                          client=client,
                          num_workers=num_workers)

    spec.update_specifications(user_params)
    print('path for tax functions: ', spec.output_base)
    spec.get_tax_function_parameters(client, run_micro)
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''
    ss_outputs = SS.run_SS(spec, client=client)
    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(baseline_dir, "model_params.pkl")
        pickle.dump(spec, open(param_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(output_base, "model_params.pkl")
        pickle.dump(spec, open(param_dir, "wb"))

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''
        tpi_output = TPI.run_TPI(spec, client=client)
        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        pickle.dump(tpi_output, open(tpi_vars, "wb"))

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(time.time() -
                                                              tick))
Esempio n. 24
0
def runner_SS(output_base,
              baseline_dir,
              baseline=False,
              analytical_mtrs=True,
              age_specific=False,
              reform={},
              user_params={},
              guid='',
              run_micro=True):

    from ogusa import parameters, demographics, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline,
                                     analytical_mtrs=analytical_mtrs,
                                     age_specific=age_specific,
                                     start_year=user_params['start_year'],
                                     reform=reform,
                                     guid=guid)
    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(
            float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    from ogusa import SS, TPI
    '''
    ****
    CALL CALIBRATION here if boolean flagged

    ****
    '''
    calibrate_model = False
    # if calibrate_model:
    #     chi_b, chi_n = calibrate.(income_tax_params, ss_params, iterative_params, chi_params, baseline,
    #                                  calibrate_model, output_dir=output_base, baseline_dir=baseline_dir)

    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire',
        'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth',
        'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS 
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(
        **sim_params)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_params,
                           iterative_params,
                           chi_params,
                           baseline,
                           baseline_dir=baseline_dir)
    '''
    ------------------------------------------------------------------------
        Pickle SS results 
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
Esempio n. 25
0
def runner(output_base, baseline_dir, test=False, time_path=True,
           baseline=True, reform={}, user_params={}, guid='',
           run_micro=True, data=None, client=None, num_workers=1):

    tick = time.time()
    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError:
            pass

    print('In runner, baseline is ', baseline)

    # Get parameter class
    # Note - set run_micro false when initially load class
    # Update later with call to spec.get_tax_function_parameters()
    spec = Specifications(run_micro=False, output_base=output_base,
                          baseline_dir=baseline_dir, test=test,
                          time_path=time_path, baseline=baseline,
                          reform=reform, guid=guid, data=data,
                          client=client, num_workers=num_workers)

    spec.update_specifications(user_params)
    print('path for tax functions: ', spec.output_base)
    spec.get_tax_function_parameters(client, run_micro)

    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''
    ss_outputs = SS.run_SS(spec, client=client)

    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(baseline_dir, "model_params.pkl")
        pickle.dump(spec, open(param_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(output_base, "model_params.pkl")
        pickle.dump(spec, open(param_dir, "wb"))

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''
        tpi_output = TPI.run_TPI(spec, client=client)

        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        pickle.dump(tpi_output, open(tpi_vars, "wb"))

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(
        time.time() - tick))
Esempio n. 26
0
def runner(output_base,
           baseline_dir,
           baseline=False,
           analytical_mtrs=True,
           age_specific=False,
           reform=0,
           fix_transfers=False,
           user_params={},
           guid='',
           run_micro=True,
           calibrate_model=False):

    from ogusa import parameters, demographics, income, utils

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline,
                                                 reform=reform,
                                                 guid=guid,
                                                 user_modifiable=True)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'sigma' in user_params:
        print "updating sigma"
        run_params['sigma'] = user_params['sigma']
        run_params.update(user_params)

    from ogusa import SS, TPI

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire',
        'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth',
        'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates', 'e', 'rho',
        'omega_S_preTP'
    ]
    '''
    ------------------------------------------------------------------------
        If using income tax reform, need to determine parameters that yield
        same SS revenue as the wealth tax reform.
    ------------------------------------------------------------------------
    '''
    if reform == 1:
        sim_params = {}
        for key in param_names:
            sim_params[key] = run_params[key]

        sim_params['output_dir'] = output_base
        sim_params['run_params'] = run_params
        income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(
            **sim_params)

        # find SS revenue from wealth tax reform
        reform3_ss_dir = os.path.join(
            "./OUTPUT_WEALTH_REFORM" + '/sigma' + str(run_params['sigma']),
            "SS/SS_vars.pkl")
        reform3_ss_solutions = pickle.load(open(reform3_ss_dir, "rb"))
        receipts_to_match = reform3_ss_solutions['net_tax_receipts']

        # create function to match SS revenue
        # def matcher(d_guess, params):
        #     income_tax_params, receipts_to_match, ss_params, iterative_params,\
        #                       chi_params, baseline, baseline_dir = params
        #     analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
        #     etr_params[:,3] = d_guess
        #     mtrx_params[:,3] = d_guess
        #     mtry_params[:,3] = d_guess
        #     income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
        #     ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params,
        #                       chi_params, baseline ,baseline_dir=baseline_dir, output_base=output_base)
        #
        #     receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
        #     error = abs(receipts_to_match - receipts_new)
        #     if d_guess <= 0:
        #         error = 1e14
        #     print 'Error in taxes:', error
        #     return error

        # print 'Computing new income tax to match wealth tax'
        d_guess = 0.413  # initial guess
        # import scipy.optimize as opt
        # params = [income_tax_params, receipts_to_match, ss_params, iterative_params,
        #                   chi_params, baseline, baseline_dir]
        # new_d_inc = opt.fsolve(matcher, d_guess, args=params, xtol=1e-6)
        new_d_inc = 0.413  # this value comes out given default parameter values (if fix_transfers=True this is 0.503 if False then 0.453)

        print '\tOld income tax:', d_guess
        print '\tNew income tax:', new_d_inc
        analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params

        etr_params[:, 3] = new_d_inc
        mtrx_params[:, 3] = new_d_inc
        mtry_params[:, 3] = new_d_inc

        run_params['etr_params'] = np.tile(
            np.reshape(etr_params, (run_params['S'], 1, etr_params.shape[1])),
            (1, run_params['BW'], 1))
        run_params['mtrx_params'] = np.tile(
            np.reshape(mtrx_params,
                       (run_params['S'], 1, mtrx_params.shape[1])),
            (1, run_params['BW'], 1))
        run_params['mtry_params'] = np.tile(
            np.reshape(mtry_params,
                       (run_params['S'], 1, mtry_params.shape[1])),
            (1, run_params['BW'], 1))
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(
        **sim_params)

    analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
    print('ETR param shape = ', etr_params.shape)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_parameters,
                           iterative_params,
                           chi_params,
                           baseline,
                           fix_transfers=fix_transfers,
                           baseline_dir=baseline_dir)
    '''
    ------------------------------------------------------------------------
        Pickle SS results and parameters of run
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        param_dir = os.path.join(baseline_dir, "run_parameters.pkl")
        pickle.dump(sim_params, open(param_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        param_dir = os.path.join(output_base, "run_parameters.pkl")
        pickle.dump(sim_params, open(param_dir, "wb"))
    '''
    ------------------------------------------------------------------------
        Run the TPI simulation
    ------------------------------------------------------------------------
    '''

    sim_params['baseline'] = baseline
    sim_params['input_dir'] = output_base
    sim_params['baseline_dir'] = baseline_dir

    income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(
        **sim_params)

    tpi_output, macro_output = TPI.run_TPI(income_tax_params,
                                           tpi_params,
                                           iterative_params,
                                           initial_values,
                                           SS_values,
                                           fix_transfers=fix_transfers,
                                           output_dir=output_base)
    '''
    ------------------------------------------------------------------------
        Pickle TPI results
    ------------------------------------------------------------------------
    '''
    tpi_dir = os.path.join(output_base, "TPI")
    utils.mkdirs(tpi_dir)
    tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
    pickle.dump(tpi_output, open(tpi_vars, "wb"))

    tpi_dir = os.path.join(output_base, "TPI")
    utils.mkdirs(tpi_dir)
    tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl")
    pickle.dump(macro_output, open(tpi_vars, "wb"))

    print "Time path iteration complete.  It"
    print "took {0} seconds to get that part done.".format(time.time() - tick)
Esempio n. 27
0
def test_run_SS(input_path, expected_path):
    # Test SS.run_SS function.  Provide inputs to function and
    # ensure that output returned matches what it has been before.
    input_tuple = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', input_path))
    (income_tax_params, ss_params, iterative_params, chi_params,
     small_open_params, baseline, baseline_spending, baseline_dir) =\
        input_tuple
    p = Specifications()
    (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon,
     Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll,
     tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T,
     p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e,
     retire, p.mean_income_data, h_wealth, p_wealth, m_wealth,
     p.b_ellipse, p.upsilon) = ss_params
    p.Z = np.ones(p.T + p.S) * Z
    p.tau_bq = np.ones(p.T + p.S) * 0.0
    p.tau_payroll = np.ones(p.T + p.S) * tau_payroll
    p.alpha_T = np.ones(p.T + p.S) * alpha_T
    p.tau_b = np.ones(p.T + p.S) * tau_b
    p.delta_tau = np.ones(p.T + p.S) * delta_tau
    p.h_wealth = np.ones(p.T + p.S) * h_wealth
    p.p_wealth = np.ones(p.T + p.S) * p_wealth
    p.m_wealth = np.ones(p.T + p.S) * m_wealth
    p.retire = (np.ones(p.T + p.S) * retire).astype(int)
    p.lambdas = lambdas.reshape(p.J, 1)
    p.imm_rates = imm_rates.reshape(1, p.S)
    p.tax_func_type = 'DEP'
    p.baseline = baseline
    p.baseline_spending = baseline_spending
    p.baseline_dir = baseline_dir
    p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
        income_tax_params
    p.etr_params = np.transpose(etr_params.reshape(
        p.S, 1, etr_params.shape[-1]), (1, 0, 2))
    p.mtrx_params = np.transpose(mtrx_params.reshape(
        p.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
    p.mtry_params = np.transpose(mtry_params.reshape(
        p.S, 1, mtry_params.shape[-1]), (1, 0, 2))
    p.maxiter, p.mindist_SS = iterative_params
    p.chi_b, p.chi_n = chi_params
    p.small_open, firm_r, hh_r = small_open_params
    p.firm_r = np.ones(p.T + p.S) * firm_r
    p.hh_r = np.ones(p.T + p.S) * hh_r
    p.num_workers = 1
    test_dict = SS.run_SS(p, None)

    expected_dict = utils.safe_read_pickle(
        os.path.join(CUR_PATH, 'test_io_data', expected_path))

    # delete values key-value pairs that are not in both dicts
    del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b']
    del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss']
    test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] -
                                       test_dict['business_revenue'])
    del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss']
    del test_dict['resource_constraint_error'], test_dict['T_Css']
    del test_dict['r_gov_ss'], test_dict['r_hh_ss']
    test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss')

    for k, v in expected_dict.items():
        assert(np.allclose(test_dict[k], v))
Esempio n. 28
0
def run_model(meta_param_dict, adjustment):
    '''
    Initializes classes from OG-USA that compute the model under
    different policies.  Then calls function get output objects.
    '''
    meta_params = MetaParams()
    meta_params.adjust(meta_param_dict)
    if meta_params.data_source == "PUF":
        data = retrieve_puf(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    else:
        data = "cps"
    # Create output directory structure
    base_dir = os.path.join(CUR_DIR, BASELINE_DIR)
    reform_dir = os.path.join(CUR_DIR, REFORM_DIR)
    dirs = [base_dir, reform_dir]
    for _dir in dirs:
        utils.mkdirs(_dir)
    # Dask parmeters
    client = None
    num_workers = 1

    # whether to estimate tax functions from microdata
    run_micro = True

    # Solve baseline model
    base_spec = {
        'start_year': meta_param_dict['year'],
        'debt_ratio_ss': 2.0,
        'r_gov_scale': 1.0,
        'r_gov_shift': 0.02,
        'zeta_D': [0.4],
        'zeta_K': [0.1],
        'initial_debt_ratio': 0.78,
        'initial_foreign_debt_ratio': 0.4,
        'tax_func_type': 'linear',
        'age_specific': False
    }
    base_params = Specifications(run_micro=False,
                                 output_base=base_dir,
                                 baseline_dir=base_dir,
                                 test=False,
                                 time_path=False,
                                 baseline=True,
                                 iit_reform={},
                                 guid='',
                                 data=data,
                                 client=client,
                                 num_workers=num_workers)
    base_params.update_specifications(base_spec)
    base_params.get_tax_function_parameters(client, run_micro)
    base_ss = SS.run_SS(base_params, client=client)
    utils.mkdirs(os.path.join(base_dir, "SS"))
    ss_dir = os.path.join(base_dir, "SS", "SS_vars.pkl")
    with open(ss_dir, "wb") as f:
        pickle.dump(base_ss, f)

    # Solve reform model
    reform_spec = base_spec
    reform_spec.update(adjustment['ogusa'])
    reform_params = Specifications(run_micro=False,
                                   output_base=reform_dir,
                                   baseline_dir=base_dir,
                                   test=False,
                                   time_path=False,
                                   baseline=False,
                                   iit_reform={},
                                   guid='',
                                   data=data,
                                   client=client,
                                   num_workers=num_workers)
    reform_params.update_specifications(reform_spec)
    reform_params.get_tax_function_parameters(client, run_micro)
    reform_ss = SS.run_SS(reform_params, client=client)

    comp_dict = comp_output(base_ss, base_params, reform_ss, reform_params)

    return comp_dict
Esempio n. 29
0
    guid='_example',
    data='cps',
    client=client,
    num_workers=num_workers)


def chi_n_func(s, a0, a1, a2, a3, a4):
    chi_n = a0 + a1 * s + a2 * s**2 + a3 * s**3 + a4 * s**4
    return chi_n


a0 = 1
a1 = 0
a2 = 0
a3 = 0
a4 = 0

params_init = np.array([a0, a1, a2, a3, a4])

labor_data = np.array([167, 165, 165, 165, 165, 166, 165, 165, 164, 166, 164])
labor_moments = labor_data * 12 / (365 * 17.5)
data_moments = np.array(list(labor_moments.flatten()))
ages = np.linspace(20, 100, p.S)
p.chi_n = chi_n_func(ages, a0, a1, a2, a3, a4)
### had to add this to make it work:
ss_output = SS.run_SS(p, client)
model_moments = calc_moments(ss_output, p.omega_SS, p.lambdas, p.S, p.J)

print(labor_moments)
print(model_moments)
Esempio n. 30
0
def runner_SS(output_base, baseline_dir, baseline=False,
  analytical_mtrs=False, age_specific=False, reform={}, user_params={},
  guid='', run_micro=True):

    from ogusa import parameters, demographics, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific,
                                     start_year=user_params['start_year'], reform=reform, guid=guid)
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'],
                                                               run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        ending_age = run_params['ending_age']
        starting_age = run_params['starting_age']
        S = run_params['S']
        g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    from ogusa import SS, TPI


    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
                'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
                'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
                'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon',
                'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
                'mtry_params','tau_payroll', 'tau_bq',
                'retire', 'mean_income_data', 'g_n_vector',
                'h_wealth', 'p_wealth', 'm_wealth',
                'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates', 'e', 'rho', 'omega_S_preTP']


    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_params, iterative_params, chi_params= SS.create_steady_state_parameters(**sim_params)

    '''
    ****
    CALL CALIBRATION here if boolean flagged
    ****
    '''
    calibrate_model = False
    if calibrate_model:
        chi_params = calibrate.chi_estimate(income_tax_params, ss_params, iterative_params, chi_params, baseline_dir=baseline_dir)

    ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, baseline,
                                     baseline_dir=baseline_dir)

    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
Esempio n. 31
0
def runner_SS(output_base,
              baseline_dir,
              baseline=False,
              analytical_mtrs=True,
              age_specific=False,
              reform=0,
              fix_transfers=False,
              user_params={},
              guid='',
              calibrate_model=False,
              run_micro=True):

    from ogusa import parameters, demographics, income, utils

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline,
                                                 reform=reform,
                                                 guid=guid,
                                                 user_modifiable=True)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'sigma' in user_params:
        print "updating sigma"
        run_params['sigma'] = user_params['sigma']
        run_params.update(user_params)

    from ogusa import SS, TPI, SS_alt

    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire',
        'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth',
        'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates', 'e', 'rho',
        'omega_S_preTP'
    ]

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params
    '''
    ------------------------------------------------------------------------
        If using income tax reform, need to determine parameters that yield
        same SS revenue as the wealth tax reform.
    ------------------------------------------------------------------------
    '''
    if reform == 1:

        income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(
            **sim_params)

        # find SS revenue from wealth tax reform
        reform3_ss_dir = os.path.join(
            "./OUTPUT_WEALTH_REFORM" + '/sigma' + str(run_params['sigma']),
            "SS/SS_vars.pkl")
        reform3_ss_solutions = pickle.load(open(reform3_ss_dir, "rb"))
        receipts_to_match = reform3_ss_solutions[
            'T_Hss'] + reform3_ss_solutions['Gss']

        # create function to match SS revenue
        def matcher(d_guess, params):
            income_tax_params, receipts_to_match, ss_params, iterative_params,\
                              chi_params, baseline, baseline_dir = params
            analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
            etr_params[:, 3] = d_guess
            mtrx_params[:, 3] = d_guess
            mtry_params[:, 3] = d_guess
            income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
            ss_outputs = SS.run_SS(income_tax_params,
                                   ss_params,
                                   iterative_params,
                                   chi_params,
                                   baseline,
                                   fix_transfers=fix_transfers,
                                   baseline_dir=baseline_dir)

            receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
            error = abs(receipts_to_match - receipts_new)
            if d_guess <= 0:
                error = 1e14
            print 'Error in taxes:', error
            return error

        print 'Computing new income tax to match wealth tax'

        # d_guess= .452 # initial guess 0.452 works for sigma = 2, frisch 1.5
        # new_d_inc = d_guess
        # import scipy.optimize as opt
        # params = [income_tax_params, receipts_to_match, ss_params, iterative_params,
        #                   chi_params, baseline, baseline_dir]
        # new_d_inc = opt.fsolve(matcher, d_guess, args=params, xtol=1e-8)
        # print '\tOld income tax:', d_guess
        # print '\tNew income tax:', new_d_inc

        def samesign(a, b):
            return a * b > 0

        def bisect_method(func, params, low, high):
            'Find root of continuous function where f(low) and f(high) have opposite signs'

            #assert not samesign(func(params,low), func(params,high))

            for i in range(54):
                midpoint = (low + high) / 2.0
                if samesign(func(params, low), func(params, midpoint)):
                    low = midpoint
                else:
                    high = midpoint

            return midpoint

        def solve_model(params, d):
            income_tax_params, ss_params, iterative_params,\
                              chi_params, baseline ,baseline_dir = params
            analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
            etr_params[:, 3] = d
            mtrx_params[:, 3] = d
            mtry_params[:, 3] = d
            income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
            ss_outputs = SS.run_SS(income_tax_params,
                                   ss_params,
                                   iterative_params,
                                   chi_params,
                                   baseline,
                                   fix_transfers=fix_transfers,
                                   baseline_dir=baseline_dir)
            ss_dir = os.path.join("./OUTPUT_INCOME_REFORM/sigma2.0",
                                  "SS/SS_vars.pkl")
            pickle.dump(ss_outputs, open(ss_dir, "wb"))
            receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
            new_error = receipts_to_match - receipts_new
            print 'Error in taxes:', error
            print 'New income tax:', d
            return new_error

        # print 'Computing new income tax to match wealth tax'
        # d_guess= 0.5025 # initial guess
        # # income_tax_params, receipts_to_match, ss_params, iterative_params,\
        # #                   chi_params, baseline, baseline_dir = params
        # analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
        # etr_params[:,3] = d_guess
        # mtrx_params[:,3] = d_guess
        # mtry_params[:,3] = d_guess
        # income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
        # ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params,
        #                   chi_params, baseline, fix_transfers=fix_transfers,
        #                   baseline_dir=baseline_dir)
        # ss_dir = os.path.join("./OUTPUT_INCOME_REFORM/sigma2.0", "SS/SS_vars.pkl")
        # pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
        # error = receipts_to_match - receipts_new
        # new_error = error
        # print "ERROR: ", error
        # max_loop_iter = 1
        # output_list = np.zeros((max_loop_iter,3))
        # loop_iter = 0
        # bisect = 0
        # d_guess_old = d_guess
        # # while np.abs(new_error) > 1e-8 and loop_iter < max_loop_iter:
        # while loop_iter < max_loop_iter:
        #     # if new_error > 0 and new_error > 0 and bisect == 0:
        #     #     d_guess_old = d_guess
        #     #     d_guess+=0.001
        #     # elif new_error < 0 and new_error < 0 and bisect == 0:
        #     #     d_guess_old = d_guess
        #     #     d_guess-=0.001
        #     #     d_guess = max(0.0,d_guess) # constrain so not negative
        #     # else:
        #     #     bisect = 1
        #     #     print 'Entering bisection method'
        #     #     params = income_tax_params, ss_params, iterative_params,\
        #     #                       chi_params, baseline ,baseline_dir
        #     #     high = max(d_guess,d_guess_old)
        #     #     low = min(d_guess,d_guess_old)
        #     #     d_guess = bisect_method(solve_model, params, low, high)
        #     #     loop_iter = max_loop_iter
        #     d_guess_old = d_guess
        #     d_guess+=0.0005
        #
        #     error = new_error
        #     etr_params[:,3] = d_guess
        #     mtrx_params[:,3] = d_guess
        #     mtry_params[:,3] = d_guess
        #     income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params
        #     print 'now here$$$'
        #     ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params,
        #                       chi_params, baseline, fix_transfers=fix_transfers,
        #                       baseline_dir=baseline_dir)
        #     ss_dir = os.path.join("./OUTPUT_INCOME_REFORM/sigma2.0", "SS/SS_vars.pkl")
        #     pickle.dump(ss_outputs, open(ss_dir, "wb"))
        #     receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss']
        #     new_error = (receipts_to_match - receipts_new)
        #     print "ERROR: ", new_error
        #     output_list[loop_iter,0]=new_error
        #     output_list[loop_iter,1]=d_guess
        #     output_list[loop_iter,2]=ss_outputs['Yss']-ss_outputs['Iss']-ss_outputs['Css']-ss_outputs['Gss']
        #     np.savetxt('inc_tax_out.csv',output_list, delimiter=",")
        #     pickle.dump(output_list, open("output_list.pkl", "wb"))
        #     print 'Error in taxes:', error
        #     print 'Old income tax:', d_guess_old
        #     print 'New income tax:', d_guess
        #     print 'iteration: ', loop_iter
        #     loop_iter += 1

        analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
        new_d_inc = 0.5025  # this is 0.453 if fix_transfers=False, 0.503 if True
        etr_params[:, 3] = new_d_inc
        mtrx_params[:, 3] = new_d_inc
        mtry_params[:, 3] = new_d_inc

        sim_params['etr_params'] = np.tile(
            np.reshape(etr_params, (run_params['S'], 1, etr_params.shape[1])),
            (1, run_params['BW'], 1))
        sim_params['mtrx_params'] = np.tile(
            np.reshape(mtrx_params,
                       (run_params['S'], 1, mtrx_params.shape[1])),
            (1, run_params['BW'], 1))
        sim_params['mtry_params'] = np.tile(
            np.reshape(mtry_params,
                       (run_params['S'], 1, mtry_params.shape[1])),
            (1, run_params['BW'], 1))
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''
    income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(
        **sim_params)
    analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params
    '''
    ****
    CALL CALIBRATION here if boolean flagged
    ****
    '''
    if calibrate_model:
        chi_params = calibrate.chi_estimate(income_tax_params,
                                            ss_params,
                                            iterative_params,
                                            chi_params,
                                            baseline_dir=baseline_dir)

    # ss_outputs = SS_alt.run_SS(income_tax_params, ss_params, iterative_params,
    #                   chi_params, baseline, baseline_dir=baseline_dir)
    print 'Fix transfers = ', fix_transfers
    ss_outputs = SS.run_SS(income_tax_params,
                           ss_params,
                           iterative_params,
                           chi_params,
                           baseline,
                           fix_transfers=fix_transfers,
                           baseline_dir=baseline_dir)

    model_moments = ogusa.calibrate.calc_moments(ss_outputs,
                                                 sim_params['omega_SS'],
                                                 sim_params['lambdas'],
                                                 sim_params['S'],
                                                 sim_params['J'])

    scf, data = ogusa.wealth.get_wealth_data()
    wealth_moments = ogusa.wealth.compute_wealth_moments(
        scf, sim_params['lambdas'], sim_params['J'])

    print 'model moments: ', model_moments[:sim_params['J'] + 2]
    print 'data moments: ', wealth_moments
    '''
    ------------------------------------------------------------------------
        Pickle SS results and parameters
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        param_dir = os.path.join(baseline_dir, "run_parameters.pkl")
        pickle.dump(sim_params, open(param_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        param_dir = os.path.join(output_base, "run_parameters.pkl")
        pickle.dump(sim_params, open(param_dir, "wb"))
Esempio n. 32
0
def runner_SS(
    output_base,
    baseline_dir,
    baseline=False,
    analytical_mtrs=False,
    age_specific=False,
    reform={},
    user_params={},
    guid="",
    run_micro=True,
):

    from ogusa import parameters, demographics, income, utils
    from ogusa import txfunc

    tick = time.time()

    # Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(
            baseline=baseline,
            analytical_mtrs=analytical_mtrs,
            age_specific=age_specific,
            start_year=user_params["start_year"],
            reform=reform,
            guid=guid,
        )
    print ("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params["analytical_mtrs"] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if "frisch" in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params["frisch"], run_params["ltilde"])
        run_params["b_ellipse"] = b_ellipse
        run_params["upsilon"] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if "g_y_annual" in user_params:
        print "updating g_y_annual and associated"
        ending_age = run_params["ending_age"]
        starting_age = run_params["starting_age"]
        S = run_params["S"]
        g_y = (1 + user_params["g_y_annual"]) ** (float(ending_age - starting_age) / S) - 1
        run_params["g_y"] = g_y
        run_params.update(user_params)

    from ogusa import SS, TPI

    """
    ****
    CALL CALIBRATION here if boolean flagged

    ****
    """
    calibrate_model = False
    # if calibrate_model:
    #     chi_b, chi_n = calibrate.(income_tax_params, ss_params, iterative_params, chi_params, baseline,
    #                                  calibrate_model, output_dir=output_base, baseline_dir=baseline_dir)

    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        "S",
        "J",
        "T",
        "BW",
        "lambdas",
        "starting_age",
        "ending_age",
        "beta",
        "sigma",
        "alpha",
        "nu",
        "Z",
        "delta",
        "E",
        "ltilde",
        "g_y",
        "maxiter",
        "mindist_SS",
        "mindist_TPI",
        "analytical_mtrs",
        "b_ellipse",
        "k_ellipse",
        "upsilon",
        "chi_b_guess",
        "chi_n_guess",
        "etr_params",
        "mtrx_params",
        "mtry_params",
        "tau_payroll",
        "tau_bq",
        "retire",
        "mean_income_data",
        "g_n_vector",
        "h_wealth",
        "p_wealth",
        "m_wealth",
        "omega",
        "g_n_ss",
        "omega_SS",
        "surv_rate",
        "imm_rates",
        "e",
        "rho",
        "omega_S_preTP",
    ]

    """
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    """

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params["output_dir"] = output_base
    sim_params["run_params"] = run_params

    income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_SS(
        income_tax_params, ss_params, iterative_params, chi_params, baseline, baseline_dir=baseline_dir
    )

    """
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    """
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
Esempio n. 33
0
def runner(output_base,
           baseline_dir,
           test=False,
           time_path=True,
           baseline=False,
           analytical_mtrs=False,
           age_specific=False,
           reform={},
           user_params={},
           guid='',
           run_micro=True,
           small_open=False,
           budget_balance=False,
           baseline_spending=False):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, demographics, income, utils
    from ogusa import txfunc

    tick = time.time()

    # Make sure options are internally consistent
    if baseline == True and baseline_spending == True:
        print(
            'Inconsistent options. Setting <baseline_spending> to False, leaving <baseline> True.'
        )
        baseline_spending = False
    if budget_balance == True and baseline_spending == True:
        print(
            'Inconsistent options. Setting <baseline_spending> to False, leaving <budget_balance> True.'
        )
        baseline_spending = False

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [saved_moments_dir, ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline,
                                     analytical_mtrs=analytical_mtrs,
                                     age_specific=age_specific,
                                     start_year=user_params['start_year'],
                                     reform=reform,
                                     guid=guid)
    print('In runner, baseline is ', baseline)
    run_params = ogusa.parameters.get_parameters(test=test,
                                                 baseline=baseline,
                                                 guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs
    run_params['small_open'] = small_open
    run_params['budget_balance'] = budget_balance

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print("updating frisch and associated")
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)
    if 'debt_ratio_ss' in user_params:
        run_params['debt_ratio_ss'] = user_params['debt_ratio_ss']

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print("updating g_y_annual and associated")
        ending_age = run_params['ending_age']
        starting_age = run_params['starting_age']
        S = run_params['S']
        g_y = (1 + user_params['g_y_annual'])**(
            float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    # Modify transfer & spending ratios based on user input.
    if 'T_shifts' in user_params:
        if baseline_spending == False:
            print('updating ALPHA_T with T_shifts in first',
                  user_params['T_shifts'].size, 'periods.')
            T_shifts = np.concatenate((user_params['T_shifts'],
                                       np.zeros(run_params['ALPHA_T'].size -
                                                user_params['T_shifts'].size)),
                                      axis=0)
            run_params['ALPHA_T'] = run_params['ALPHA_T'] + T_shifts
    if 'G_shifts' in user_params:
        if baseline_spending == False:
            print('updating ALPHA_G with G_shifts in first',
                  user_params['G_shifts'].size, 'periods.')
            G_shifts = np.concatenate((user_params['G_shifts'],
                                       np.zeros(run_params['ALPHA_G'].size -
                                                user_params['G_shifts'].size)),
                                      axis=0)
            run_params['ALPHA_G'] = run_params['ALPHA_G'] + G_shifts

    from ogusa import SS, TPI

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'gamma', 'epsilon', 'nu', 'Z', 'delta', 'E',
        'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
        'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'small_open',
        'budget_balance', 'ss_firm_r', 'ss_hh_r', 'tpi_firm_r', 'tpi_hh_r',
        'tG1', 'tG2', 'alpha_T', 'alpha_G', 'ALPHA_T', 'ALPHA_G', 'rho_G',
        'debt_ratio_ss', 'tau_b', 'delta_tau', 'chi_b_guess', 'chi_n_guess',
        'etr_params', 'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq',
        'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth',
        'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates',
        'e', 'rho', 'initial_debt', 'omega_S_preTP'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params
    income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params = SS.create_steady_state_parameters(
        **sim_params)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_parameters,
                           iterative_params,
                           chi_params,
                           small_open_params,
                           baseline,
                           baseline_spending,
                           baseline_dir=baseline_dir)
    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''

        sim_params['baseline'] = baseline
        sim_params['baseline_spending'] = baseline_spending
        sim_params['input_dir'] = output_base
        sim_params['baseline_dir'] = baseline_dir

        income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params = TPI.create_tpi_params(
            **sim_params)

        tpi_output, macro_output = TPI.run_TPI(
            income_tax_params,
            tpi_params,
            iterative_params,
            small_open_params,
            initial_values,
            SS_values,
            fiscal_params,
            biz_tax_params,
            output_dir=output_base,
            baseline_spending=baseline_spending)
        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        pickle.dump(tpi_output, open(tpi_vars, "wb"))

        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl")
        pickle.dump(macro_output, open(tpi_vars, "wb"))

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(time.time() -
                                                              tick))
Esempio n. 34
0
def run_model(meta_param_dict, adjustment):
    '''
    Initializes classes from OG-USA that compute the model under
    different policies.  Then calls function get output objects.
    '''
    print('Meta_param_dict = ', meta_param_dict)
    print('adjustment dict = ', adjustment)

    meta_params = MetaParams()
    meta_params.adjust(meta_param_dict)
    if meta_params.data_source == "PUF":
        data = retrieve_puf(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
        # set name of cached baseline file in case use below
        cached_pickle = 'TxFuncEst_baseline_PUF.pkl'
    else:
        data = "cps"
        # set name of cached baseline file in case use below
        cached_pickle = 'TxFuncEst_baseline_CPS.pkl'
    # Get TC params adjustments
    iit_mods = convert_policy_adjustment(
        adjustment["Tax-Calculator Parameters"])
    # Create output directory structure
    base_dir = os.path.join(CUR_DIR, BASELINE_DIR)
    reform_dir = os.path.join(CUR_DIR, REFORM_DIR)
    dirs = [base_dir, reform_dir]
    for _dir in dirs:
        utils.mkdirs(_dir)

    # Dask parmeters
    client = Client()
    num_workers = 5
    # TODO: Swap to these parameters when able to specify tax function
    # and model workers separately
    # num_workers_txf = 5
    # num_workers_mod = 6

    # whether to estimate tax functions from microdata
    run_micro = True
    time_path = meta_param_dict['time_path'][0]['value']

    # filter out OG-USA params that will not change between baseline and
    # reform runs (these are the non-policy parameters)
    filtered_ogusa_params = {}
    constant_param_set = {
        'frisch', 'beta_annual', 'sigma', 'g_y_annual', 'gamma', 'epsilon',
        'Z', 'delta_annual', 'small_open', 'world_int_rate',
        'initial_foreign_debt_ratio', 'zeta_D', 'zeta_K', 'tG1', 'tG2',
        'rho_G', 'debt_ratio_ss', 'budget_balance'
    }
    filtered_ogusa_params = OrderedDict()
    for k, v in adjustment['OG-USA Parameters'].items():
        if k in constant_param_set:
            filtered_ogusa_params[k] = v

    # Solve baseline model
    start_year = meta_param_dict['year'][0]['value']
    if start_year == 2020:
        OGPATH = inspect.getfile(SS)
        OGDIR = os.path.dirname(OGPATH)
        tax_func_path = None  #os.path.join(OGDIR, 'data', 'tax_functions',
        #             cached_pickle)
        run_micro_baseline = False
    else:
        tax_func_path = None
        run_micro_baseline = True
    base_spec = {
        **{
            'start_year': start_year,
            'tax_func_type': 'DEP',
            'age_specific': False
        },
        **filtered_ogusa_params
    }
    base_params = Specifications(run_micro=False,
                                 output_base=base_dir,
                                 baseline_dir=base_dir,
                                 test=False,
                                 time_path=False,
                                 baseline=True,
                                 iit_reform={},
                                 guid='',
                                 data=data,
                                 client=client,
                                 num_workers=num_workers)
    base_params.update_specifications(base_spec)
    base_params.get_tax_function_parameters(client,
                                            run_micro_baseline,
                                            tax_func_path=tax_func_path)
    base_ss = SS.run_SS(base_params, client=client)
    utils.mkdirs(os.path.join(base_dir, "SS"))
    base_ss_dir = os.path.join(base_dir, "SS", "SS_vars.pkl")
    with open(base_ss_dir, "wb") as f:
        pickle.dump(base_ss, f)
    if time_path:
        base_tpi = TPI.run_TPI(base_params, client=client)
        tpi_dir = os.path.join(base_dir, "TPI", "TPI_vars.pkl")
        with open(tpi_dir, "wb") as f:
            pickle.dump(base_tpi, f)
    else:
        base_tpi = None

    # Solve reform model
    reform_spec = base_spec
    reform_spec.update(adjustment["OG-USA Parameters"])
    reform_params = Specifications(run_micro=False,
                                   output_base=reform_dir,
                                   baseline_dir=base_dir,
                                   test=False,
                                   time_path=time_path,
                                   baseline=False,
                                   iit_reform=iit_mods,
                                   guid='',
                                   data=data,
                                   client=client,
                                   num_workers=num_workers)
    reform_params.update_specifications(reform_spec)
    reform_params.get_tax_function_parameters(client, run_micro)
    reform_ss = SS.run_SS(reform_params, client=client)
    utils.mkdirs(os.path.join(reform_dir, "SS"))
    reform_ss_dir = os.path.join(reform_dir, "SS", "SS_vars.pkl")
    with open(reform_ss_dir, "wb") as f:
        pickle.dump(reform_ss, f)
    if time_path:
        reform_tpi = TPI.run_TPI(reform_params, client=client)
    else:
        reform_tpi = None

    comp_dict = comp_output(base_params, base_ss, reform_params, reform_ss,
                            time_path, base_tpi, reform_tpi)

    # Shut down client and make sure all of its references are
    # cleaned up.
    client.close()
    del client

    return comp_dict
Esempio n. 35
0
def runner(output_base,
           baseline_dir,
           test=False,
           time_path=True,
           baseline=False,
           constant_rates=True,
           tax_func_type='DEP',
           analytical_mtrs=False,
           age_specific=False,
           reform={},
           user_params={},
           guid='',
           run_micro=True,
           small_open=False,
           budget_balance=False,
           baseline_spending=False,
           data=None,
           client=None,
           num_workers=1):

    from ogusa import parameters, demographics, income, utils

    tick = time.time()

    start_year = user_params.get('start_year', DEFAULT_START_YEAR)
    if start_year > TC_LAST_YEAR:
        raise RuntimeError("Start year is beyond data extrapolation.")

    # Make sure options are internally consistent
    if baseline and baseline_spending:
        print("Inconsistent options. Setting <baseline_spending> to False, "
              "leaving <baseline> True.'")
        baseline_spending = False
    if budget_balance and baseline_spending:
        print("Inconsistent options. Setting <baseline_spending> to False, "
              "leaving <budget_balance> True.")
        baseline_spending = False

    # Create output directory structure
    ss_dir = os.path.join(output_base, "SS")
    tpi_dir = os.path.join(output_base, "TPI")
    dirs = [ss_dir, tpi_dir]
    for _dir in dirs:
        try:
            print("making dir: ", _dir)
            os.makedirs(_dir)
        except OSError as oe:
            pass

    print('In runner, baseline is ', baseline)
    if small_open and (not isinstance(small_open, dict)):
        raise ValueError(
            'small_open must be False/None or a dict with keys: {}'.format(
                SMALL_OPEN_KEYS))
    small_open = small_open or {}
    run_params = ogusa.parameters.get_parameters(
        output_base,
        reform=reform,
        test=test,
        baseline=baseline,
        guid=guid,
        run_micro=run_micro,
        constant_rates=constant_rates,
        analytical_mtrs=analytical_mtrs,
        tax_func_type=tax_func_type,
        age_specific=age_specific,
        start_year=start_year,
        data=data,
        client=client,
        num_workers=num_workers,
        **small_open)
    run_params['analytical_mtrs'] = analytical_mtrs
    run_params['small_open'] = bool(small_open)
    run_params['budget_balance'] = budget_balance
    run_params['world_int_rate'] = small_open.get('world_int_rate',
                                                  DEFAULT_WORLD_INT_RATE)

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print("updating frisch and associated")
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)
    if 'debt_ratio_ss' in user_params:
        run_params['debt_ratio_ss'] = user_params['debt_ratio_ss']
    if 'tau_b' in user_params:
        run_params['tau_b'] = user_params['tau_b']

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print("updating g_y_annual and associated")
        ending_age = run_params['ending_age']
        starting_age = run_params['starting_age']
        S = run_params['S']
        g_y = ((1 + user_params['g_y_annual'])
               **(float(ending_age - starting_age) / S) - 1)
        run_params['g_y'] = g_y
        run_params.update(user_params)

    # Modify transfer & spending ratios based on user input.
    if 'T_shifts' in user_params:
        if not baseline_spending:
            print('updating ALPHA_T with T_shifts in first',
                  user_params['T_shifts'].size, 'periods.')
            T_shifts = np.concatenate((user_params['T_shifts'],
                                       np.zeros(run_params['ALPHA_T'].size -
                                                user_params['T_shifts'].size)),
                                      axis=0)
            run_params['ALPHA_T'] = run_params['ALPHA_T'] + T_shifts
    if 'G_shifts' in user_params:
        if not baseline_spending:
            print('updating ALPHA_G with G_shifts in first',
                  user_params['G_shifts'].size, 'periods.')
            G_shifts = np.concatenate((user_params['G_shifts'],
                                       np.zeros(run_params['ALPHA_G'].size -
                                                user_params['G_shifts'].size)),
                                      axis=0)
            run_params['ALPHA_G'] = run_params['ALPHA_G'] + G_shifts

    from ogusa import SS, TPI

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'gamma', 'epsilon', 'nu', 'Z', 'delta', 'E',
        'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
        'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'small_open',
        'budget_balance', 'ss_firm_r', 'ss_hh_r', 'tpi_firm_r', 'tpi_hh_r',
        'tG1', 'tG2', 'alpha_T', 'alpha_G', 'ALPHA_T', 'ALPHA_G', 'rho_G',
        'debt_ratio_ss', 'tau_b', 'delta_tau', 'chi_b_guess', 'chi_n_guess',
        'etr_params', 'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq',
        'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth',
        'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates',
        'e', 'rho', 'initial_debt', 'omega_S_preTP'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params
    sim_params['tax_func_type'] = tax_func_type
    (income_tax_params, ss_parameters, iterative_params, chi_params,
     small_open_params) = SS.create_steady_state_parameters(**sim_params)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_parameters,
                           iterative_params,
                           chi_params,
                           small_open_params,
                           baseline,
                           baseline_spending,
                           baseline_dir=baseline_dir,
                           client=client,
                           num_workers=num_workers)
    '''
    ------------------------------------------------------------------------
        Pickle SS results
    ------------------------------------------------------------------------
    '''
    model_params = {}
    for key in param_names:
        model_params[key] = sim_params[key]
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(baseline_dir, "model_params.pkl")
        pickle.dump(model_params, open(param_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_base, "SS"))
        ss_dir = os.path.join(output_base, "SS/SS_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
        # Save pickle with parameter values for the run
        param_dir = os.path.join(output_base, "model_params.pkl")
        pickle.dump(model_params, open(param_dir, "wb"))

    if time_path:
        '''
        ------------------------------------------------------------------------
            Run the TPI simulation
        ------------------------------------------------------------------------
        '''

        sim_params['baseline'] = baseline
        sim_params['baseline_spending'] = baseline_spending
        sim_params['input_dir'] = output_base
        sim_params['baseline_dir'] = baseline_dir

        (income_tax_params, tpi_params,
         iterative_params, small_open_params, initial_values, SS_values,
         fiscal_params, biz_tax_params) =\
            TPI.create_tpi_params(**sim_params)

        tpi_output = TPI.run_TPI(income_tax_params,
                                 tpi_params,
                                 iterative_params,
                                 small_open_params,
                                 initial_values,
                                 SS_values,
                                 fiscal_params,
                                 biz_tax_params,
                                 output_dir=output_base,
                                 baseline_spending=baseline_spending,
                                 client=client,
                                 num_workers=num_workers)
        '''
        ------------------------------------------------------------------------
            Pickle TPI results
        ------------------------------------------------------------------------
        '''
        tpi_dir = os.path.join(output_base, "TPI")
        utils.mkdirs(tpi_dir)
        tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl")
        pickle.dump(tpi_output, open(tpi_vars, "wb"))

        print("Time path iteration complete.")
    print("It took {0} seconds to get that part done.".format(time.time() -
                                                              tick))
Esempio n. 36
0
def runner(output_base,
           baseline_dir,
           baseline=False,
           analytical_mtrs=True,
           age_specific=False,
           reform={},
           user_params={},
           guid='',
           run_micro=True):

    #from ogusa import parameters, wealth, labor, demographics, income
    from ogusa import parameters, wealth, labor, demog, income, utils
    from ogusa import txfunc

    tick = time.time()

    #Create output directory structure
    saved_moments_dir = os.path.join(output_base, "Saved_moments")
    ssinit_dir = os.path.join(output_base, "SSinit")
    tpiinit_dir = os.path.join(output_base, "TPIinit")
    dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir]
    for _dir in dirs:
        try:
            print "making dir: ", _dir
            os.makedirs(_dir)
        except OSError as oe:
            pass

    if run_micro:
        txfunc.get_tax_func_estimate(baseline=baseline,
                                     analytical_mtrs=analytical_mtrs,
                                     age_specific=age_specific,
                                     start_year=user_params['start_year'],
                                     reform=reform,
                                     guid=guid)
    print("in runner, baseline is ", baseline)
    run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid)
    run_params['analytical_mtrs'] = analytical_mtrs

    # Modify ogusa parameters based on user input
    if 'frisch' in user_params:
        print "updating fricsh and associated"
        b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(
            user_params['frisch'], run_params['ltilde'])
        run_params['b_ellipse'] = b_ellipse
        run_params['upsilon'] = upsilon
        run_params.update(user_params)

    # Modify ogusa parameters based on user input
    if 'g_y_annual' in user_params:
        print "updating g_y_annual and associated"
        g_y = (1 + user_params['g_y_annual'])**(
            float(ending_age - starting_age) / S) - 1
        run_params['g_y'] = g_y
        run_params.update(user_params)

    from ogusa import SS, TPI
    # Generate Wealth data moments
    wealth.get_wealth_data(run_params['lambdas'],
                           run_params['J'],
                           run_params['flag_graphs'],
                           output_dir=output_base)

    # Generate labor data moments
    labor.labor_data_moments(run_params['flag_graphs'], output_dir=output_base)

    calibrate_model = False
    # List of parameter names that will not be changing (unless we decide to
    # change them for a tax experiment)

    param_names = [
        'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
        'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
        'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse',
        'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params',
        'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire',
        'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth',
        'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'
    ]
    '''
    ------------------------------------------------------------------------
        Run SS 
    ------------------------------------------------------------------------
    '''

    sim_params = {}
    for key in param_names:
        sim_params[key] = run_params[key]

    sim_params['output_dir'] = output_base
    sim_params['run_params'] = run_params

    income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(
        **sim_params)

    ss_outputs = SS.run_SS(income_tax_params,
                           ss_parameters,
                           iterative_params,
                           chi_params,
                           baseline,
                           baseline_dir=baseline_dir)
    '''
    ------------------------------------------------------------------------
        Pickle SS results 
    ------------------------------------------------------------------------
    '''
    if baseline:
        utils.mkdirs(os.path.join(baseline_dir, "SS"))
        ss_dir = os.path.join(baseline_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    else:
        utils.mkdirs(os.path.join(output_dir, "SS"))
        ss_dir = os.path.join(output_dir, "SS/ss_vars.pkl")
        pickle.dump(ss_outputs, open(ss_dir, "wb"))
    '''
    ------------------------------------------------------------------------
        Run the baseline TPI simulation
    ------------------------------------------------------------------------
    '''

    sim_params['input_dir'] = output_base
    sim_params['baseline_dir'] = baseline_dir

    income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(
        **sim_params)

    # ss_outputs['income_tax_params'] = income_tax_params
    # ss_outputs['wealth_tax_params'] = wealth_tax_params
    # ss_outputs['ellipse_params'] = ellipse_params
    # ss_outputs['parameters'] = parameters
    # ss_outputs['N_tilde'] = N_tilde
    # ss_outputs['omega_stationary'] = omega_stationary
    # ss_outputs['K0'] = K0
    # ss_outputs['b_sinit'] = b_sinit
    # ss_outputs['b_splus1init'] = b_splus1init
    # ss_outputs['L0'] = L0
    # ss_outputs['Y0'] = Y0
    # ss_outputs['r0'] = r0
    # ss_outputs['BQ0'] = BQ0
    # ss_outputs['T_H_0'] = T_H_0
    # ss_outputs['factor_ss'] = factor
    # ss_outputs['tax0'] = tax0
    # ss_outputs['c0'] = c0
    # ss_outputs['initial_b'] = initial_b
    # ss_outputs['initial_n'] = initial_n
    # ss_outputs['tau_bq'] = tau_bq
    # ss_outputs['g_n_vector'] = g_n_vector
    # ss_outputs['output_dir'] = output_base

    # with open("ss_outputs.pkl", 'wb') as fp:
    #     pickle.dump(ss_outputs, fp)

    w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_TPI(
        income_tax_params,
        tpi_params,
        iterative_params,
        initial_values,
        SS_values,
        output_dir=output_base)

    print "getting to here...."
    TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs)
    print "took {0} seconds to get that part done.".format(time.time() - tick)