def runner(self): ''' Method to run the model ''' # baseline compute self.base_ss_outputs = SS.run_SS(self.spec1) if self.spec1.time_path: self.base_tpi_output = TPI.run_TPI(self.spec1) # reform compute # a bit more complicated because will need stuff from baseline self.reform_ss_outputs = SS.run_SS(self.spec2) if self.spec2.time_path: self.reform_tpi_output = TPI.run_TPI(self.spec2)
def minstat(beta_guesses, *args): ''' This function generates the weighted sum of squared differences between the model and data moments. Args: beta_guesses (array-like): a vector of length J with the betas args (tuple): length 6 tuple, variables needed for minimizer Returns: distance (scalar): weighted, squared deviation between data and model moments ''' # unpack args tuple data_moments, W, p, client = args # Update beta in parameters object with beta guesses p.beta = beta_guesses # Solve model SS print('Baseline = ', p.baseline) ss_output = SS.run_SS(p, client=client) # Compute moments from model SS model_moments = calc_moments(ss_output, p) # distance with levels distance = np.dot( np.dot((np.array(model_moments) - np.array(data_moments)).T, W), np.array(model_moments) - np.array(data_moments)) print('DATA and MODEL DISTANCE: ', distance) return distance
def find_moments(p, client): b_guess = np.ones((p.S, p.J)) * 0.07 n_guess = np.ones((p.S, p.J)) * .4 * p.ltilde rguess = 0.08961277823002804 # 0.09 T_Hguess = 0.12 factorguess = 12.73047710050195 # 7.7 #70000 # Modified BQguess = aggr.get_BQ(rguess, b_guess, None, p, 'SS', False) exit_early = [0, -1] # 2nd value gives number of valid labor moments to consider before exiting SS_fsolve # Put -1 to run to SS ss_params_baseline = (b_guess, n_guess, None, None, p, client, exit_early) guesses = [rguess] + list(BQguess) + [T_Hguess, factorguess] [solutions_fsolve, infodict, ier, message] =\ opt.fsolve(SS.SS_fsolve, guesses, args=ss_params_baseline, xtol=p.mindist_SS, full_output=True) rss = solutions_fsolve[0] BQss = solutions_fsolve[1:-2] T_Hss = solutions_fsolve[-2] factor_ss = solutions_fsolve[-1] Yss = T_Hss/p.alpha_T[-1] fsolve_flag = True try: output = SS.SS_solver(b_guess, n_guess, rss, BQss, T_Hss, factor_ss, Yss, p, client, fsolve_flag) except: print('RuntimeError: Steady state aggregate resource constraint not satisfied') print('Luckily we caught the error, so minstat_init_calibrate will continue') return 1e10 model_moments = np.array(output['nssmat'].mean(axis=1)[:45]) # calc_moments(output, p.omega_SS, p.lambdas, p.S, p.J) return model_moments
def test_run_SS(input_path, expected_path): # Test SS.run_SS function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', input_path)) (income_tax_params, ss_params, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir) =\ input_tuple p = Specifications() (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon) = ss_params p.Z = np.ones(p.T + p.S) * Z p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.alpha_T = np.ones(p.T + p.S) * alpha_T p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.lambdas = lambdas.reshape(p.J, 1) p.imm_rates = imm_rates.reshape(1, p.S) p.tax_func_type = 'DEP' p.baseline = baseline p.baseline_spending = baseline_spending p.baseline_dir = baseline_dir p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\ income_tax_params p.etr_params = np.transpose( etr_params.reshape(p.S, 1, etr_params.shape[-1]), (1, 0, 2)) p.mtrx_params = np.transpose( mtrx_params.reshape(p.S, 1, mtrx_params.shape[-1]), (1, 0, 2)) p.mtry_params = np.transpose( mtry_params.reshape(p.S, 1, mtry_params.shape[-1]), (1, 0, 2)) p.maxiter, p.mindist_SS = iterative_params p.chi_b, p.chi_n = chi_params p.small_open, ss_firm_r, ss_hh_r = small_open_params p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r p.num_workers = 1 test_dict = SS.run_SS(p, None) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', expected_path)) # delete values key-value pairs that are not in both dicts del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b'] del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss'] test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] - test_dict['business_revenue']) del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss'] del test_dict['resource_constraint_error'], test_dict['T_Css'] test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss') for k, v in expected_dict.items(): assert (np.allclose(test_dict[k], v))
def test_SS_solver(baseline, param_updates, filename, dask_client): # Test SS.SS_solver function. Provide inputs to function and # ensure that output returned matches what it has been before. p = Specifications(baseline=baseline, client=dask_client, num_workers=NUM_WORKERS) p.update_specifications(param_updates) p.output_base = CUR_PATH p.get_tax_function_parameters(None, run_micro=False) b_guess = np.ones((p.S, p.J)) * 0.07 n_guess = np.ones((p.S, p.J)) * .35 * p.ltilde if p.zeta_K[-1] == 1.0: rguess = p.world_int_rate[-1] else: rguess = 0.06483431412921253 TRguess = 0.05738932081035772 factorguess = 139355.1547340256 BQguess = aggregates.get_BQ(rguess, b_guess, None, p, 'SS', False) Yguess = 0.6376591201150815 test_dict = SS.SS_solver(b_guess, n_guess, rguess, BQguess, TRguess, factorguess, Yguess, p, None, False) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', filename)) for k, v in expected_dict.items(): print('Testing ', k) assert (np.allclose(test_dict[k], v, atol=1e-07, equal_nan=True))
def test_run_SS(baseline, param_updates, filename, dask_client): # Test SS.run_SS function. Provide inputs to function and # ensure that output returned matches what it has been before. if baseline is False: tax_func_path_baseline = os.path.join(CUR_PATH, 'TxFuncEst_baseline.pkl') tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_policy.pkl') execute.runner(constants.BASELINE_DIR, constants.BASELINE_DIR, time_path=False, baseline=True, og_spec=param_updates, run_micro=False, tax_func_path=tax_func_path_baseline) else: tax_func_path = os.path.join(CUR_PATH, 'TxFuncEst_baseline.pkl') p = Specifications(baseline=baseline, client=dask_client, num_workers=NUM_WORKERS) p.update_specifications(param_updates) p.get_tax_function_parameters(None, run_micro=False, tax_func_path=tax_func_path) test_dict = SS.run_SS(p, None) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', filename)) for k, v in expected_dict.items(): assert (np.allclose(test_dict[k], v))
def test_SS_fsolve(guesses, args, expected): ''' Test SS.SS_fsolve function. Provide inputs to function and ensure that output returned matches what it has been before. ''' test_list = SS.SS_fsolve(guesses, *args) assert (np.allclose(np.array(test_list), np.array(expected), atol=1e-6))
def test_inner_loop(baseline, param_updates, filename, dask_client): # Test SS.inner_loop function. Provide inputs to function and # ensure that output returned matches what it has been before. p = Specifications(baseline=baseline, client=dask_client, num_workers=NUM_WORKERS) p.update_specifications(param_updates) p.output_base = CUR_PATH p.get_tax_function_parameters(None, run_micro=False) bssmat = np.ones((p.S, p.J)) * 0.07 nssmat = np.ones((p.S, p.J)) * .4 * p.ltilde if p.zeta_K[-1] == 1.0: r = p.world_int_rate[-1] else: r = 0.05 TR = 0.12 Y = 1.3 factor = 100000 BQ = np.ones(p.J) * 0.00019646295986015257 if p.budget_balance: outer_loop_vars = (bssmat, nssmat, r, BQ, TR, factor) else: outer_loop_vars = (bssmat, nssmat, r, BQ, Y, TR, factor) test_tuple = SS.inner_loop(outer_loop_vars, p, None) expected_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', filename)) for i, v in enumerate(expected_tuple): assert (np.allclose(test_tuple[i], v, atol=1e-05))
def minstat_init_calibrate(params, *args): a0, a1, a2, a3, a4 = params p, client, data_moments, W, ages = args chi_n = np.ones(p.S) chi_n[:p.S // 2 + 5] = chebyshev_func(ages, a0, a1, a2, a3, a4) slope = chi_n[p.S // 2 + 5 - 1] - chi_n[p.S // 2 + 5 - 2] chi_n[p.S // 2 + 5 - 1:] = (np.linspace(65, 100, 36) - 65) * slope + chi_n[p.S // 2 + 5 - 1] chi_n[chi_n < 0.5] = 0.5 p.chi_n = chi_n print("-----------------------------------------------------") print('PARAMS AT START' + str(params)) print("-----------------------------------------------------") b_guess = np.ones((p.S, p.J)) * 0.07 n_guess = np.ones((p.S, p.J)) * .4 * p.ltilde rguess = 0.09 T_Hguess = 0.12 factorguess = 7.7 #70000 # Modified BQguess = aggr.get_BQ(rguess, b_guess, None, p, 'SS', False) exit_early = [ 0, 2 ] # 2nd value gives number of valid labor moments to consider before exiting SS_fsolve ss_params_baseline = (b_guess, n_guess, None, None, p, client, exit_early) guesses = [rguess] + list(BQguess) + [T_Hguess, factorguess] [solutions_fsolve, infodict, ier, message] =\ opt.fsolve(SS.SS_fsolve, guesses, args=ss_params_baseline, xtol=p.mindist_SS, full_output=True) rss = solutions_fsolve[0] BQss = solutions_fsolve[1:-2] T_Hss = solutions_fsolve[-2] factor_ss = solutions_fsolve[-1] Yss = T_Hss / p.alpha_T[-1] fsolve_flag = True try: output = SS.SS_solver(b_guess, n_guess, rss, BQss, T_Hss, factor_ss, Yss, p, client, fsolve_flag) except: print( 'RuntimeError: Steady state aggregate resource constraint not satisfied' ) print( 'Luckily we caught the error, so minstat_init_calibrate will continue' ) return 1e10 model_moments = calc_moments(output, p.omega_SS, p.lambdas, p.S, p.J) print('Model moments:', model_moments) print("-----------------------------------------------------") distance = np.dot( np.dot((np.array(model_moments[:9]) - np.array(data_moments)).T, W), np.array(model_moments[:9]) - np.array(data_moments)) print('DATA and MODEL DISTANCE: ', distance) return distance
def test_inner_loop(): # Test SS.inner_loop function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/inner_loop_inputs.pkl')) (outer_loop_vars_in, params, baseline, baseline_spending) = input_tuple ss_params, income_tax_params, chi_params, small_open_params = params (bssmat, nssmat, r, Y, T_H, factor) = outer_loop_vars_in p = Specifications() (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon) = ss_params p.Z = np.ones(p.T + p.S) * Z p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.alpha_T = np.ones(p.T + p.S) * alpha_T p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.lambdas = lambdas.reshape(p.J, 1) p.imm_rates = imm_rates.reshape(1, p.S) p.tax_func_type = 'DEP' p.baseline = baseline p.baseline_spending = baseline_spending p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\ income_tax_params p.etr_params = np.transpose(etr_params.reshape( p.S, 1, etr_params.shape[-1]), (1, 0, 2)) p.mtrx_params = np.transpose(mtrx_params.reshape( p.S, 1, mtrx_params.shape[-1]), (1, 0, 2)) p.mtry_params = np.transpose(mtry_params.reshape( p.S, 1, mtry_params.shape[-1]), (1, 0, 2)) p.chi_b, p.chi_n = chi_params p.small_open, firm_r, hh_r = small_open_params p.firm_r = np.ones(p.T + p.S) * firm_r p.hh_r = np.ones(p.T + p.S) * hh_r p.num_workers = 1 BQ = np.ones(p.J) * 0.00019646295986015257 outer_loop_vars = (bssmat, nssmat, r, BQ, Y, T_H, factor) (euler_errors, new_bmat, new_nmat, new_r, new_r_gov, new_r_hh, new_w, new_T_H, new_Y, new_factor, new_BQ, average_income_model) = SS.inner_loop(outer_loop_vars, p, None) test_tuple = (euler_errors, new_bmat, new_nmat, new_r, new_w, new_T_H, new_Y, new_factor, new_BQ, average_income_model) expected_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/inner_loop_outputs.pkl')) for i, v in enumerate(expected_tuple): assert(np.allclose(test_tuple[i], v, atol=1e-05))
def test_constant_demographics_TPI(): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time. In this case, given how initial guesss for the time path are made, the time path should be solved for on the first iteration and the values all along the time path should equal their steady-state values. ''' output_base = "./OUTPUT" baseline_dir = "./OUTPUT" user_params = { 'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2 } # Create output directory structure ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError as oe: pass spec = Specifications(run_micro=False, output_base=output_base, baseline_dir=baseline_dir, test=False, time_path=True, baseline=True, reform={}, guid='') spec.update_specifications(user_params) print('path for tax functions: ', spec.output_base) spec.get_tax_function_parameters(None, False) # Run SS ss_outputs = SS.run_SS(spec, None) # save SS results utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(baseline_dir, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) tpi_output = TPI.run_TPI(spec, None) print( 'Max diff btwn SS and TP bsplus1 = ', np.absolute(tpi_output['bmat_splus1'][:spec.T, :, :] - ss_outputs['bssmat_splus1']).max()) print('Max diff btwn SS and TP Y = ', np.absolute(tpi_output['Y'][:spec.T] - ss_outputs['Yss']).max()) assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client): ''' Test TPI.run_TPI function. Provide inputs to function and ensure that output returned matches what it has been before. ''' baseline_dir = os.path.join(CUR_PATH, 'baseline') if baseline: output_base = baseline_dir else: output_base = os.path.join(CUR_PATH, 'reform') p = Specifications(baseline=baseline, baseline_dir=baseline_dir, output_base=output_base, test=True, client=dask_client, num_workers=NUM_WORKERS) p.update_specifications(param_updates) p.maxiter = 2 # this test runs through just two iterations p.get_tax_function_parameters(None, run_micro=False, tax_func_path=os.path.join( CUR_PATH, '..', 'data', 'tax_functions', 'TxFuncEst_baseline_CPS.pkl')) # Need to run SS first to get results SS.ENFORCE_SOLUTION_CHECKS = False ss_outputs = SS.run_SS(p, None) if p.baseline: utils.mkdirs(os.path.join(p.baseline_dir, "SS")) ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) else: utils.mkdirs(os.path.join(p.output_base, "SS")) ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) TPI.ENFORCE_SOLUTION_CHECKS = False test_dict = TPI.run_TPI(p, None) expected_dict = utils.safe_read_pickle(filename) for k, v in expected_dict.items(): try: assert (np.allclose(test_dict[k][:p.T], v[:p.T], rtol=1e-04, atol=1e-04)) except ValueError: assert (np.allclose(test_dict[k][:p.T, :, :], v[:p.T, :, :], rtol=1e-04, atol=1e-04))
def compute_se(beta_hat, W, K, p, h=0.01, client=None): """ Function to compute standard errors for the SMM estimator. Args: beta_hat (array-like): estimates of beta parameters W (Numpy array): weighting matrix K (int): number of moments p (OG-USA Specifications object): model parameters h (scalar): percentage to move parameters for numerical derivatives client (Dask Client object): Dask client Returns: beta_se (array-like): standard errors for beta estimates VCV_params (Numpy array): VCV matrix for parameter estimates """ # compute numerical derivatives that will need for SE's model_moments_low = np.zeros((p.J, K)) model_moments_high = np.zeros((p.J, K)) beta_low = beta_hat beta_high = beta_hat for i in range(len(beta_hat)): # compute moments with downward change in param beta_low[i] = beta_hat[i] * (1 + h) p.beta = beta_low ss_output = ss_output = SS.run_SS(p, client=client) model_moments_low[i, :] = calc_moments(ss_output, p) # compute moments with upward change in param beta_high[i] = beta_hat[i] * (1 - h) p.beta = beta_low ss_output = ss_output = SS.run_SS(p, client=client) model_moments_high[i, :] = calc_moments(ss_output, p) deriv_moments = (model_moments_high - model_moments_low).T / (2 * h * beta_hat) VCV_params = np.linalg.inv( np.dot(np.dot(deriv_moments.T, W), deriv_moments)) beta_se = (np.diag(VCV_params))**(1 / 2) return beta_se, VCV_params
def test_constant_demographics_TPI(dask_client): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time. In this case, given how initial guesss for the time path are made, the time path should be solved for on the first iteration and the values all along the time path should equal their steady-state values. ''' # Create output directory structure spec = Specifications(run_micro=False, output_base=OUTPUT_DIR, baseline_dir=OUTPUT_DIR, test=False, time_path=True, baseline=True, iit_reform={}, guid='', client=dask_client, num_workers=NUM_WORKERS) og_spec = { 'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2, 'r_gov_shift': 0.0, 'zeta_D': [0.0, 0.0], 'zeta_K': [0.0, 0.0], 'debt_ratio_ss': 1.0, 'initial_foreign_debt_ratio': 0.0, 'start_year': 2019, 'cit_rate': [0.0], 'PIA_rate_bkt_1': 0.0, 'PIA_rate_bkt_2': 0.0, 'PIA_rate_bkt_3': 0.0, 'eta': (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J)) } spec.update_specifications(og_spec) spec.get_tax_function_parameters(None, False, tax_func_path=TAX_FUNC_PATH) # Run SS ss_outputs = SS.run_SS(spec, None) # save SS results utils.mkdirs(os.path.join(OUTPUT_DIR, "SS")) ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) # Run TPI tpi_output = TPI.run_TPI(spec, None) assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def test_constant_demographics_TPI(): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time. In this case, given how initial guesss for the time path are made, the time path should be solved for on the first iteration and the values all along the time path should equal their steady-state values. ''' output_base = "./OUTPUT" baseline_dir = "./OUTPUT" user_params = {'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2} # Create output directory structure ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError as oe: pass spec = Specifications(run_micro=False, output_base=output_base, baseline_dir=baseline_dir, test=False, time_path=True, baseline=True, reform={}, guid='') spec.update_specifications(user_params) print('path for tax functions: ', spec.output_base) spec.get_tax_function_parameters(None, False) # Run SS ss_outputs = SS.run_SS(spec, None) # save SS results utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(baseline_dir, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) tpi_output = TPI.run_TPI(spec, None) print('Max diff btwn SS and TP bsplus1 = ', np.absolute(tpi_output['bmat_splus1'][:spec.T, :, :] - ss_outputs['bssmat_splus1']).max()) print('Max diff btwn SS and TP Y = ', np.absolute(tpi_output['Y'][:spec.T] - ss_outputs['Yss']).max()) assert(np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def test_constant_demographics_TPI(): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time. In this case, given how initial guesss for the time path are made, the time path should be solved for on the first iteration and the values all along the time path should equal their steady-state values. ''' output_base = os.path.join(CUR_PATH, 'OUTPUT') baseline_dir = output_base # Create output directory structure ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError: pass spec = Specifications(run_micro=False, output_base=output_base, baseline_dir=baseline_dir, test=False, time_path=True, baseline=True, iit_reform={}, guid='') og_spec = { 'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2, 'eta': (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J)) } spec.update_specifications(og_spec) spec.get_tax_function_parameters(None, False) # Run SS ss_outputs = SS.run_SS(spec, None) # save SS results utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Run TPI tpi_output = TPI.run_TPI(spec, None) assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def test_constant_demographics_TPI_small_open(): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time, as well as with a small open economy assumption. ''' # Create output directory structure spec = Specifications(run_micro=False, output_base=OUTPUT_DIR, baseline_dir=OUTPUT_DIR, test=False, time_path=True, baseline=True, iit_reform={}, guid='') og_spec = { 'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2, 'r_gov_shift': 0.0, 'zeta_D': [0.0, 0.0], 'zeta_K': [1.0], 'debt_ratio_ss': 1.0, 'initial_foreign_debt_ratio': 0.0, 'start_year': 2019, 'cit_rate': [0.0], 'PIA_rate_bkt_1': 0.0, 'PIA_rate_bkt_2': 0.0, 'PIA_rate_bkt_3': 0.0, 'eta': (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J)) } spec.update_specifications(og_spec) spec.get_tax_function_parameters(None, False, tax_func_path=TAX_FUNC_PATH) # Run SS ss_outputs = SS.run_SS(spec, None) # save SS results utils.mkdirs(os.path.join(OUTPUT_DIR, "SS")) ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) # Run TPI tpi_output = TPI.run_TPI(spec, None) assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def test_inner_loop(): # Test SS.inner_loop function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/inner_loop_inputs.pkl')) (outer_loop_vars, params, baseline, baseline_spending) = input_tuple ss_params, income_tax_params, chi_params, small_open_params = params income_tax_params = ('DEP', ) + income_tax_params params = (ss_params, income_tax_params, chi_params, small_open_params) test_tuple = SS.inner_loop(outer_loop_vars, params, baseline, baseline_spending) expected_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/inner_loop_outputs.pkl')) for i, v in enumerate(expected_tuple): assert (np.allclose(test_tuple[i], v))
def test_SS_fsolve_reform(): # Test SS.SS_fsolve_reform function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_fsolve_reform_inputs.pkl')) guesses, params = input_tuple params = params + (None, 1) (bssmat, nssmat, chi_params, ss_params, income_tax_params, iterative_params, factor, small_open_params, client, num_workers) = params income_tax_params = ('DEP', ) + income_tax_params params = (bssmat, nssmat, chi_params, ss_params, income_tax_params, iterative_params, factor, small_open_params, client, num_workers) test_list = SS.SS_fsolve_reform(guesses, params) expected_list = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_fsolve_reform_outputs.pkl')) assert (np.allclose(np.array(test_list), np.array(expected_list)))
def test_run_SS(input_path, expected_path): # Test SS.run_SS function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', input_path)) (income_tax_params, ss_params, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir) =\ input_tuple income_tax_params = ('DEP', ) + income_tax_params test_dict = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', expected_path)) for k, v in expected_dict.items(): assert (np.allclose(test_dict[k], v))
def test_SS_solver(): # Test SS.SS_solver function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_solver_inputs.pkl')) (b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params, baseline, fsolve_flag, baseline_spending) = input_tuple (bssmat, nssmat, chi_params, ss_params, income_tax_params, iterative_params, small_open_params) = params income_tax_params = ('DEP', ) + income_tax_params params = (bssmat, nssmat, chi_params, ss_params, income_tax_params, iterative_params, small_open_params) test_dict = SS.SS_solver(b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params, baseline, fsolve_flag, baseline_spending) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_solver_outputs.pkl')) for k, v in expected_dict.items(): assert (np.allclose(test_dict[k], v))
def matcher(d_guess, params): income_tax_params, receipts_to_match, ss_params, iterative_params,\ chi_params, baseline, baseline_dir = params analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params etr_params[:, 3] = d_guess mtrx_params[:, 3] = d_guess mtry_params[:, 3] = d_guess income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, baseline, fix_transfers=fix_transfers, baseline_dir=baseline_dir) receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss'] error = abs(receipts_to_match - receipts_new) if d_guess <= 0: error = 1e14 print 'Error in taxes:', error return error
def test_euler_equation_solver(): # Test SS.inner_loop function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/euler_eqn_solver_inputs.pkl')) (guesses, params) = input_tuple (r, w, T_H, factor, j, J, S, beta, sigma, ltilde, g_y, g_n_ss, tau_payroll, retire, mean_income_data, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon, j, chi_b, chi_n, tau_bq, rho, lambdas, omega_SS, e, analytical_mtrs, etr_params, mtrx_params, mtry_params) = params tax_func_type = 'DEP' params = (r, w, T_H, factor, j, J, S, beta, sigma, ltilde, g_y, g_n_ss, tau_payroll, retire, mean_income_data, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon, j, chi_b, chi_n, tau_bq, rho, lambdas, omega_SS, e, tax_func_type, analytical_mtrs, etr_params, mtrx_params, mtry_params) test_list = SS.euler_equation_solver(guesses, params) expected_list = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/euler_eqn_solver_outputs.pkl')) assert (np.allclose(np.array(test_list), np.array(expected_list)))
def solve_model(params, d): income_tax_params, ss_params, iterative_params,\ chi_params, baseline ,baseline_dir = params analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params etr_params[:, 3] = d mtrx_params[:, 3] = d mtry_params[:, 3] = d income_tax_params = analytical_mtrs, etr_params, mtrx_params, mtry_params ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, baseline, fix_transfers=fix_transfers, baseline_dir=baseline_dir) ss_dir = os.path.join("./OUTPUT_INCOME_REFORM/sigma2.0", "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) receipts_new = ss_outputs['T_Hss'] + ss_outputs['Gss'] new_error = receipts_to_match - receipts_new print 'Error in taxes:', error print 'New income tax:', d return new_error
def test_create_steady_state_parameters(): # Test that SS parameters creates same objects with same inputs. input_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/create_params_inputs.pkl')) input_dict['tax_func_type'] = 'DEP' test_tuple = SS.create_steady_state_parameters(**input_dict) expected_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/create_params_outputs.pkl')) (income_tax_params, ss_params, iterative_params, chi_params, small_open_params) = expected_tuple income_tax_params = ('DEP', ) + income_tax_params expected_tuple = (income_tax_params, ss_params, iterative_params, chi_params, small_open_params) for i, v in enumerate(expected_tuple): for i2, v2 in enumerate(v): try: assert (all(test_tuple[i][i2] == v2)) except ValueError: assert ((test_tuple[i][i2] == v2).all()) except TypeError: assert (test_tuple[i][i2] == v2)
def runner(output_base, input_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True): from ogusa import parameters, wealth, labor, demographics, income from ogusa import txfunc tick = time.time() #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ssinit_dir = os.path.join(output_base, "SSinit") tpiinit_dir = os.path.join(output_base, "TPIinit") dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, reform=reform, guid=guid) print ("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) globals().update(run_params) from ogusa import SS, TPI # Generate Wealth data moments wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=input_dir) # Generate labor data moments labor.labor_data_moments(flag_graphs, output_dir=input_dir) get_baseline = True calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params', 'mtry_params','tau_payroll', 'tau_bq', 'calibrate_model', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'] ''' ------------------------------------------------------------------------ Run SS with minimization to fit chi_b and chi_n ------------------------------------------------------------------------ ''' # This is the simulation before getting the replacement rate values sim_params = {} glbs = globals() lcls = locals() for key in param_names: if key in glbs: sim_params[key] = glbs[key] else: sim_params[key] = lcls[key] sim_params['output_dir'] = input_dir sim_params['run_params'] = run_params income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, get_baseline, calibrate_model, output_dir=input_dir) ''' ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ ''' ss_outputs['get_baseline'] = get_baseline sim_params['input_dir'] = input_dir income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \ b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params) ss_outputs['income_tax_params'] = income_tax_params ss_outputs['wealth_tax_params'] = wealth_tax_params ss_outputs['ellipse_params'] = ellipse_params ss_outputs['parameters'] = parameters ss_outputs['N_tilde'] = N_tilde ss_outputs['omega_stationary'] = omega_stationary ss_outputs['K0'] = K0 ss_outputs['b_sinit'] = b_sinit ss_outputs['b_splus1init'] = b_splus1init ss_outputs['L0'] = L0 ss_outputs['Y0'] = Y0 ss_outputs['r0'] = r0 ss_outputs['BQ0'] = BQ0 ss_outputs['T_H_0'] = T_H_0 ss_outputs['tax0'] = tax0 ss_outputs['c0'] = c0 ss_outputs['initial_b'] = initial_b ss_outputs['initial_n'] = initial_n ss_outputs['tau_bq'] = tau_bq ss_outputs['g_n_vector'] = g_n_vector ss_outputs['output_dir'] = input_dir with open("ss_outputs.pkl", 'wb') as fp: pickle.dump(ss_outputs, fp) w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_time_path_iteration(**ss_outputs) print "getting to here...." TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs) print "took {0} seconds to get that part done.".format(time.time() - tick)
def runner_SS(output_base, baseline_dir, baseline=False, analytical_mtrs=False, age_specific=False, reform={}, user_params={}, guid='', run_micro=True): from ogusa import parameters, demographics, income, utils from ogusa import txfunc tick = time.time() #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [saved_moments_dir, ss_dir, tpi_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params['start_year'], reform=reform, guid=guid) print ("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" ending_age = run_params['ending_age'] starting_age = run_params['starting_age'] S = run_params['S'] g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) from ogusa import SS, TPI # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params', 'mtry_params','tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates', 'e', 'rho', 'omega_S_preTP'] ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' sim_params = {} for key in param_names: sim_params[key] = run_params[key] sim_params['output_dir'] = output_base sim_params['run_params'] = run_params income_tax_params, ss_params, iterative_params, chi_params= SS.create_steady_state_parameters(**sim_params) ''' **** CALL CALIBRATION here if boolean flagged **** ''' calibrate_model = False if calibrate_model: chi_params = calibrate.chi_estimate(income_tax_params, ss_params, iterative_params, chi_params, baseline_dir=baseline_dir) ss_outputs = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, baseline, baseline_dir=baseline_dir) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb"))
def runner(output_base, baseline_dir, test=False, time_path=True, baseline=False, analytical_mtrs=False, age_specific=False, reform={}, user_params={}, guid='', run_micro=True, small_open=False, budget_balance=False, baseline_spending=False): #from ogusa import parameters, wealth, labor, demographics, income from ogusa import parameters, demographics, income, utils from ogusa import txfunc tick = time.time() # Make sure options are internally consistent if baseline == True and baseline_spending == True: print( 'Inconsistent options. Setting <baseline_spending> to False, leaving <baseline> True.' ) baseline_spending = False if budget_balance == True and baseline_spending == True: print( 'Inconsistent options. Setting <baseline_spending> to False, leaving <budget_balance> True.' ) baseline_spending = False #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [saved_moments_dir, ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params['start_year'], reform=reform, guid=guid) print('In runner, baseline is ', baseline) run_params = ogusa.parameters.get_parameters(test=test, baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs run_params['small_open'] = small_open run_params['budget_balance'] = budget_balance # Modify ogusa parameters based on user input if 'frisch' in user_params: print("updating frisch and associated") b_ellipse, upsilon = ogusa.elliptical_u_est.estimation( user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) if 'debt_ratio_ss' in user_params: run_params['debt_ratio_ss'] = user_params['debt_ratio_ss'] # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print("updating g_y_annual and associated") ending_age = run_params['ending_age'] starting_age = run_params['starting_age'] S = run_params['S'] g_y = (1 + user_params['g_y_annual'])**( float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) # Modify transfer & spending ratios based on user input. if 'T_shifts' in user_params: if baseline_spending == False: print('updating ALPHA_T with T_shifts in first', user_params['T_shifts'].size, 'periods.') T_shifts = np.concatenate((user_params['T_shifts'], np.zeros(run_params['ALPHA_T'].size - user_params['T_shifts'].size)), axis=0) run_params['ALPHA_T'] = run_params['ALPHA_T'] + T_shifts if 'G_shifts' in user_params: if baseline_spending == False: print('updating ALPHA_G with G_shifts in first', user_params['G_shifts'].size, 'periods.') G_shifts = np.concatenate((user_params['G_shifts'], np.zeros(run_params['ALPHA_G'].size - user_params['G_shifts'].size)), axis=0) run_params['ALPHA_G'] = run_params['ALPHA_G'] + G_shifts from ogusa import SS, TPI calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = [ 'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'gamma', 'epsilon', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'small_open', 'budget_balance', 'ss_firm_r', 'ss_hh_r', 'tpi_firm_r', 'tpi_hh_r', 'tG1', 'tG2', 'alpha_T', 'alpha_G', 'ALPHA_T', 'ALPHA_G', 'rho_G', 'debt_ratio_ss', 'tau_b', 'delta_tau', 'chi_b_guess', 'chi_n_guess', 'etr_params', 'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates', 'e', 'rho', 'initial_debt', 'omega_S_preTP' ] ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' sim_params = {} for key in param_names: sim_params[key] = run_params[key] sim_params['output_dir'] = output_base sim_params['run_params'] = run_params income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params = SS.create_steady_state_parameters( **sim_params) ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir=baseline_dir) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) if time_path: ''' ------------------------------------------------------------------------ Run the TPI simulation ------------------------------------------------------------------------ ''' sim_params['baseline'] = baseline sim_params['baseline_spending'] = baseline_spending sim_params['input_dir'] = output_base sim_params['baseline_dir'] = baseline_dir income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params = TPI.create_tpi_params( **sim_params) tpi_output, macro_output = TPI.run_TPI( income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params, output_dir=output_base, baseline_spending=baseline_spending) ''' ------------------------------------------------------------------------ Pickle TPI results ------------------------------------------------------------------------ ''' tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(tpi_output, open(tpi_vars, "wb")) tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl") pickle.dump(macro_output, open(tpi_vars, "wb")) print("Time path iteration complete.") print("It took {0} seconds to get that part done.".format(time.time() - tick))
def test_euler_equation_solver(): # Test SS.inner_loop function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/euler_eqn_solver_inputs.pkl')) (guesses, params) = input_tuple p = Specifications() (r, w, T_H, factor, j, p.J, p.S, p.beta, p.sigma, p.ltilde, p.g_y, p.g_n_ss, tau_payroll, retire, p.mean_income_data, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon, j, p.chi_b, p.chi_n, tau_bq, p.rho, lambdas, p.omega_SS, p.e, p.analytical_mtrs, etr_params, mtrx_params, mtry_params) = params p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.etr_params = np.transpose(etr_params.reshape( p.S, 1, etr_params.shape[-1]), (1, 0, 2)) p.mtrx_params = np.transpose(mtrx_params.reshape( p.S, 1, mtrx_params.shape[-1]), (1, 0, 2)) p.mtry_params = np.transpose(mtry_params.reshape( p.S, 1, mtry_params.shape[-1]), (1, 0, 2)) p.tax_func_type = 'DEP' p.lambdas = lambdas.reshape(p.J, 1) b_splus1 = np.array(guesses[:p.S]).reshape(p.S, 1) + 0.005 BQ = aggregates.get_BQ(r, b_splus1, j, p, 'SS', False) bq = household.get_bq(BQ, j, p, 'SS') args = (r, w, bq, T_H, factor, j, p) test_list = SS.euler_equation_solver(guesses, *args) expected_list = np.array([ -3.62741663e+00, -6.30068841e+00, -6.76592886e+00, -6.97731223e+00, -7.05777777e+00, -6.57305440e+00, -7.11553046e+00, -7.30569622e+00, -7.45808107e+00, -7.89984062e+00, -8.11466111e+00, -8.28230086e+00, -8.79253862e+00, -8.86994311e+00, -9.31299476e+00, -9.80834199e+00, -9.97333771e+00, -1.08349979e+01, -1.13199826e+01, -1.22890930e+01, -1.31550471e+01, -1.42753713e+01, -1.55721098e+01, -1.73811490e+01, -1.88856303e+01, -2.09570569e+01, -2.30559500e+01, -2.52127149e+01, -2.76119605e+01, -3.03141128e+01, -3.30900203e+01, -3.62799730e+01, -3.91169706e+01, -4.24246421e+01, -4.55740527e+01, -4.92914871e+01, -5.30682805e+01, -5.70043846e+01, -6.06075991e+01, -6.45251018e+01, -6.86128365e+01, -7.35896515e+01, -7.92634608e+01, -8.34733231e+01, -9.29802390e+01, -1.01179788e+02, -1.10437881e+02, -1.20569527e+02, -1.31569973e+02, -1.43633399e+02, -1.57534056e+02, -1.73244610e+02, -1.90066728e+02, -2.07980863e+02, -2.27589046e+02, -2.50241670e+02, -2.76314755e+02, -3.04930986e+02, -3.36196973e+02, -3.70907934e+02, -4.10966644e+02, -4.56684022e+02, -5.06945218e+02, -5.61838645e+02, -6.22617808e+02, -6.90840503e+02, -7.67825713e+02, -8.54436805e+02, -9.51106365e+02, -1.05780305e+03, -1.17435473e+03, -1.30045062e+03, -1.43571221e+03, -1.57971603e+03, -1.73204264e+03, -1.88430524e+03, -2.03403679e+03, -2.17861987e+03, -2.31532884e+03, -8.00654731e+03, -5.21487172e-02, -2.80234170e-01, 4.93894552e-01, 3.11884938e-01, 6.55799607e-01, 5.62182419e-01, 3.86074983e-01, 3.43741491e-01, 4.22461089e-01, 3.63707951e-01, 4.93150010e-01, 4.72813688e-01, 4.07390308e-01, 4.94974186e-01, 4.69900128e-01, 4.37562389e-01, 5.67370182e-01, 4.88965362e-01, 6.40728461e-01, 6.14619979e-01, 4.97173823e-01, 6.19549666e-01, 6.51193557e-01, 4.48906118e-01, 7.93091492e-01, 6.51249363e-01, 6.56307713e-01, 1.12948552e+00, 9.50018058e-01, 6.79613030e-01, 9.51359123e-01, 6.31059147e-01, 7.97896887e-01, 8.44620817e-01, 7.43683837e-01, 1.56693187e+00, 2.75630011e-01, 5.32956891e-01, 1.57110727e+00, 1.22674610e+00, 4.63932928e-01, 1.47225464e+00, 1.16948107e+00, 1.07965795e+00, -3.20557791e-01, -1.17064127e+00, -7.84880649e-01, -7.60851182e-01, -1.61415945e+00, -8.30363975e-01, -1.68459409e+00, -1.49260581e+00, -1.84257084e+00, -1.72143079e+00, -1.43131579e+00, -1.63719219e+00, -1.43874851e+00, -1.57207905e+00, -1.72909159e+00, -1.98778122e+00, -1.80843826e+00, -2.12828312e+00, -2.24768762e+00, -2.36961877e+00, -2.49117258e+00, -2.59914065e+00, -2.82309085e+00, -2.93613362e+00, -3.34446991e+00, -3.45445086e+00, -3.74962140e+00, -3.78113417e+00, -4.55643800e+00, -4.86929016e+00, -5.08657898e+00, -5.22054177e+00, -5.54606515e+00, -5.78478304e+00, -5.93652041e+00, -6.11519786e+00]) assert(np.allclose(np.array(test_list), np.array(expected_list)))
def test_sstpi(): import tempfile import pickle import numpy as np import numpy as np import cPickle as pickle import os import ogusa ogusa.parameters.DATASET = "REAL" from ogusa.utils import comp_array from ogusa.utils import comp_scalar from ogusa.utils import dict_compare from ogusa.utils import pickle_file_compare import ogusa.SS import ogusa.TPI from ogusa import parameters, wealth, labor, demographics, income, SS, TPI globals().update(ogusa.parameters.get_parameters()) # Generate Wealth data moments output_dir = TEST_OUTPUT input_dir = "./OUTPUT" wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir) # Generate labor data moments labor.labor_data_moments(flag_graphs, output_dir=output_dir) get_baseline = True calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = [ "S", "J", "T", "lambdas", "starting_age", "ending_age", "beta", "sigma", "alpha", "nu", "Z", "delta", "E", "ltilde", "g_y", "maxiter", "mindist_SS", "mindist_TPI", "b_ellipse", "k_ellipse", "upsilon", "a_tax_income", "chi_b_guess", "chi_n_guess", "b_tax_income", "c_tax_income", "d_tax_income", "tau_payroll", "tau_bq", "calibrate_model", "retire", "mean_income_data", "g_n_vector", "h_wealth", "p_wealth", "m_wealth", "get_baseline", "omega", "g_n_ss", "omega_SS", "surv_rate", "e", "rho", ] """ ------------------------------------------------------------------------ Run SS with minimization to fit chi_b and chi_n ------------------------------------------------------------------------ """ # This is the simulation before getting the replacement rate values sim_params = {} for key in param_names: try: sim_params[key] = locals()[key] except KeyError: sim_params[key] = globals()[key] sim_params["output_dir"] = output_dir sim_params["input_dir"] = input_dir income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters( **sim_params ) ss_outputs = SS.run_steady_state( ss_parameters, iterative_params, get_baseline, calibrate_model, output_dir=output_dir ) """ ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ """ ss_outputs["get_baseline"] = get_baseline income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params( **sim_params ) ss_outputs["output_dir"] = output_dir ss_outputs["income_tax_params"] = income_tax_params ss_outputs["wealth_tax_params"] = wealth_tax_params ss_outputs["ellipse_params"] = ellipse_params ss_outputs["parameters"] = parameters ss_outputs["N_tilde"] = N_tilde ss_outputs["omega_stationary"] = omega_stationary ss_outputs["K0"] = K0 ss_outputs["b_sinit"] = b_sinit ss_outputs["b_splus1init"] = b_splus1init ss_outputs["L0"] = L0 ss_outputs["Y0"] = Y0 ss_outputs["r0"] = r0 ss_outputs["BQ0"] = BQ0 ss_outputs["T_H_0"] = T_H_0 ss_outputs["tax0"] = tax0 ss_outputs["c0"] = c0 ss_outputs["initial_b"] = initial_b ss_outputs["initial_n"] = initial_n ss_outputs["tau_bq"] = tau_bq ss_outputs["g_n_vector"] = g_n_vector TPI.run_time_path_iteration(**ss_outputs) # Platform specific exceptions: if sys.platform == "darwin": exceptions = {"tax_path": 0.08, "c_path": 0.02, "b_mat": 0.0017, "solutions": 0.005} else: exceptions = {} # compare results to test data for old, new in zip(oldfiles, newfiles): print "trying a pair" print old, new assert pickle_file_compare(old, new, exceptions=exceptions, relative=True) print "next pair"
def runner(output_base, baseline_dir, test=False, time_path=True, baseline=False, analytical_mtrs=False, age_specific=False, reform={}, user_params={}, guid='', run_micro=True, small_open=False, budget_balance=False, baseline_spending=False): #from ogusa import parameters, wealth, labor, demographics, income from ogusa import parameters, demographics, income, utils from ogusa import txfunc tick = time.time() # Make sure options are internally consistent if baseline==True and baseline_spending==True: print 'Inconsistent options. Setting <baseline_spending> to False, leaving <baseline> True.' baseline_spending = False if budget_balance==True and baseline_spending==True: print 'Inconsistent options. Setting <baseline_spending> to False, leaving <budget_balance> True.' baseline_spending = False #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [saved_moments_dir, ss_dir, tpi_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params['start_year'], reform=reform, guid=guid) print 'In runner, baseline is ', baseline run_params = ogusa.parameters.get_parameters(test=test, baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs run_params['small_open'] = small_open run_params['budget_balance'] = budget_balance # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating frisch and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) if 'debt_ratio_ss' in user_params: run_params['debt_ratio_ss']=user_params['debt_ratio_ss'] # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" ending_age = run_params['ending_age'] starting_age = run_params['starting_age'] S = run_params['S'] g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) # Modify transfer & spending ratios based on user input. if 'T_shifts' in user_params: if baseline_spending==False: print 'updating ALPHA_T with T_shifts in first', user_params['T_shifts'].size, 'periods.' T_shifts = np.concatenate((user_params['T_shifts'], np.zeros(run_params['ALPHA_T'].size - user_params['T_shifts'].size)), axis=0) run_params['ALPHA_T'] = run_params['ALPHA_T'] + T_shifts if 'G_shifts' in user_params: if baseline_spending==False: print 'updating ALPHA_G with G_shifts in first', user_params['G_shifts'].size, 'periods.' G_shifts = np.concatenate((user_params['G_shifts'], np.zeros(run_params['ALPHA_G'].size - user_params['G_shifts'].size)), axis=0) run_params['ALPHA_G'] = run_params['ALPHA_G'] + G_shifts from ogusa import SS, TPI calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'gamma', 'epsilon', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'small_open', 'budget_balance', 'ss_firm_r', 'ss_hh_r', 'tpi_firm_r', 'tpi_hh_r', 'tG1', 'tG2', 'alpha_T', 'alpha_G', 'ALPHA_T', 'ALPHA_G', 'rho_G', 'debt_ratio_ss', 'tau_b', 'delta_tau', 'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params', 'mtry_params','tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'imm_rates','e', 'rho', 'initial_debt','omega_S_preTP'] ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' sim_params = {} for key in param_names: sim_params[key] = run_params[key] sim_params['output_dir'] = output_base sim_params['run_params'] = run_params income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir=baseline_dir) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) if time_path: ''' ------------------------------------------------------------------------ Run the TPI simulation ------------------------------------------------------------------------ ''' sim_params['baseline'] = baseline sim_params['baseline_spending'] = baseline_spending sim_params['input_dir'] = output_base sim_params['baseline_dir'] = baseline_dir income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params = TPI.create_tpi_params(**sim_params) tpi_output, macro_output = TPI.run_TPI(income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params, output_dir=output_base, baseline_spending=baseline_spending) ''' ------------------------------------------------------------------------ Pickle TPI results ------------------------------------------------------------------------ ''' tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(tpi_output, open(tpi_vars, "wb")) tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl") pickle.dump(macro_output, open(tpi_vars, "wb")) print "Time path iteration complete." print "It took {0} seconds to get that part done.".format(time.time() - tick)
def runner_SS(output_base, baseline_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True): from ogusa import parameters, wealth, labor, demographics, income from ogusa import txfunc tick = time.time() #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ssinit_dir = os.path.join(output_base, "SSinit") tpiinit_dir = os.path.join(output_base, "TPIinit") dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params['start_year'], reform=reform, guid=guid) print ("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) globals().update(run_params) from ogusa import SS, TPI # Generate Wealth data moments wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=output_base) # Generate labor data moments labor.labor_data_moments(flag_graphs, output_dir=output_base) get_baseline = True calibrate_model = True # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params', 'mtry_params','tau_payroll', 'tau_bq', 'calibrate_model', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'] ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' sim_params = {} glbs = globals() lcls = locals() for key in param_names: if key in glbs: sim_params[key] = glbs[key] else: sim_params[key] = lcls[key] sim_params['output_dir'] = output_base sim_params['run_params'] = run_params income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, baseline, calibrate_model, output_dir=output_base, baseline_dir=baseline_dir)
def runner(output_base, baseline_dir, test=False, time_path=True, baseline=True, reform={}, user_params={}, guid='', run_micro=True, data=None, client=None, num_workers=1): tick = time.time() # Create output directory structure ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError: pass print('In runner, baseline is ', baseline) # Get parameter class # Note - set run_micro false when initially load class # Update later with call to spec.get_tax_function_parameters() spec = Specifications(run_micro=False, output_base=output_base, baseline_dir=baseline_dir, test=test, time_path=time_path, baseline=baseline, reform=reform, guid=guid, data=data, client=client, num_workers=num_workers) spec.update_specifications(user_params) print('path for tax functions: ', spec.output_base) spec.get_tax_function_parameters(client, run_micro) ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' ss_outputs = SS.run_SS(spec, client=client) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(baseline_dir, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(output_base, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) if time_path: ''' ------------------------------------------------------------------------ Run the TPI simulation ------------------------------------------------------------------------ ''' tpi_output = TPI.run_TPI(spec, client=client) ''' ------------------------------------------------------------------------ Pickle TPI results ------------------------------------------------------------------------ ''' tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(tpi_output, open(tpi_vars, "wb")) print("Time path iteration complete.") print("It took {0} seconds to get that part done.".format(time.time() - tick))
'get_baseline', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho' ] ''' ------------------------------------------------------------------------ Run SS with minimization to fit chi_b and chi_n ------------------------------------------------------------------------ ''' # This is the simulation before getting the replacement rate values sim_params = {} for key in param_names: sim_params[key] = globals()[key] #pickle.dump(dictionary, open("OUTPUT/Saved_moments/params_given.pkl", "w")) #call(['python', 'SS.py']) income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters( **sim_params) ss_outputs = SS.run_steady_state(ss_parameters, iterative_params, get_baseline, calibrate_model) ''' ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ ''' ss_outputs['get_baseline'] = get_baseline income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \ b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params) ss_outputs['income_tax_params'] = income_tax_params ss_outputs['wealth_tax_params'] = wealth_tax_params ss_outputs['ellipse_params'] = ellipse_params
def test_run_SS(input_path, expected_path): # Test SS.run_SS function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', input_path)) (income_tax_params, ss_params, iterative_params, chi_params, small_open_params, baseline, baseline_spending, baseline_dir) =\ input_tuple p = Specifications() (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon) = ss_params p.Z = np.ones(p.T + p.S) * Z p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.alpha_T = np.ones(p.T + p.S) * alpha_T p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.lambdas = lambdas.reshape(p.J, 1) p.imm_rates = imm_rates.reshape(1, p.S) p.tax_func_type = 'DEP' p.baseline = baseline p.baseline_spending = baseline_spending p.baseline_dir = baseline_dir p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\ income_tax_params p.etr_params = np.transpose(etr_params.reshape( p.S, 1, etr_params.shape[-1]), (1, 0, 2)) p.mtrx_params = np.transpose(mtrx_params.reshape( p.S, 1, mtrx_params.shape[-1]), (1, 0, 2)) p.mtry_params = np.transpose(mtry_params.reshape( p.S, 1, mtry_params.shape[-1]), (1, 0, 2)) p.maxiter, p.mindist_SS = iterative_params p.chi_b, p.chi_n = chi_params p.small_open, firm_r, hh_r = small_open_params p.firm_r = np.ones(p.T + p.S) * firm_r p.hh_r = np.ones(p.T + p.S) * hh_r p.num_workers = 1 test_dict = SS.run_SS(p, None) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', expected_path)) # delete values key-value pairs that are not in both dicts del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b'] del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss'] test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] - test_dict['business_revenue']) del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss'] del test_dict['resource_constraint_error'], test_dict['T_Css'] del test_dict['r_gov_ss'], test_dict['r_hh_ss'] test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss') for k, v in expected_dict.items(): assert(np.allclose(test_dict[k], v))
def runner(output_base, baseline_dir, test=False, time_path=True, baseline=True, reform={}, user_params={}, guid='', run_micro=True, data=None, client=None, num_workers=1): tick = time.time() # Create output directory structure ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError: pass print('In runner, baseline is ', baseline) # Get parameter class # Note - set run_micro false when initially load class # Update later with call to spec.get_tax_function_parameters() spec = Specifications(run_micro=False, output_base=output_base, baseline_dir=baseline_dir, test=test, time_path=time_path, baseline=baseline, reform=reform, guid=guid, data=data, client=client, num_workers=num_workers) spec.update_specifications(user_params) print('path for tax functions: ', spec.output_base) spec.get_tax_function_parameters(client, run_micro) ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' ss_outputs = SS.run_SS(spec, client=client) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(baseline_dir, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) # Save pickle with parameter values for the run param_dir = os.path.join(output_base, "model_params.pkl") pickle.dump(spec, open(param_dir, "wb")) if time_path: ''' ------------------------------------------------------------------------ Run the TPI simulation ------------------------------------------------------------------------ ''' tpi_output = TPI.run_TPI(spec, client=client) ''' ------------------------------------------------------------------------ Pickle TPI results ------------------------------------------------------------------------ ''' tpi_dir = os.path.join(output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(tpi_output, open(tpi_vars, "wb")) print("Time path iteration complete.") print("It took {0} seconds to get that part done.".format( time.time() - tick))
def runner(output_base, baseline_dir, baseline=False, analytical_mtrs=True, age_specific=False, reform={}, user_params={}, guid='', run_micro=True): #from ogusa import parameters, wealth, labor, demographics, income from ogusa import parameters, wealth, labor, demog, income, utils from ogusa import txfunc tick = time.time() #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ssinit_dir = os.path.join(output_base, "SSinit") tpiinit_dir = os.path.join(output_base, "TPIinit") dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params['start_year'], reform=reform, guid=guid) print ("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" g_y = (1 + user_params['g_y_annual'])**(float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) from ogusa import SS, TPI # Generate Wealth data moments wealth.get_wealth_data(run_params['lambdas'], run_params['J'], run_params['flag_graphs'], output_dir=output_base) # Generate labor data moments labor.labor_data_moments(run_params['flag_graphs'], output_dir=output_base) calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params', 'mtry_params','tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'] ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' sim_params = {} for key in param_names: sim_params[key] = run_params[key] sim_params['output_dir'] = output_base sim_params['run_params'] = run_params income_tax_params, ss_parameters, iterative_params, chi_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_SS(income_tax_params, ss_parameters, iterative_params, chi_params, baseline, baseline_dir=baseline_dir) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/ss_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(output_dir, "SS")) ss_dir = os.path.join(output_dir, "SS/ss_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) ''' ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ ''' sim_params['input_dir'] = output_base sim_params['baseline_dir'] = baseline_dir income_tax_params, tpi_params, iterative_params, initial_values, SS_values = TPI.create_tpi_params(**sim_params) # ss_outputs['income_tax_params'] = income_tax_params # ss_outputs['wealth_tax_params'] = wealth_tax_params # ss_outputs['ellipse_params'] = ellipse_params # ss_outputs['parameters'] = parameters # ss_outputs['N_tilde'] = N_tilde # ss_outputs['omega_stationary'] = omega_stationary # ss_outputs['K0'] = K0 # ss_outputs['b_sinit'] = b_sinit # ss_outputs['b_splus1init'] = b_splus1init # ss_outputs['L0'] = L0 # ss_outputs['Y0'] = Y0 # ss_outputs['r0'] = r0 # ss_outputs['BQ0'] = BQ0 # ss_outputs['T_H_0'] = T_H_0 # ss_outputs['factor_ss'] = factor # ss_outputs['tax0'] = tax0 # ss_outputs['c0'] = c0 # ss_outputs['initial_b'] = initial_b # ss_outputs['initial_n'] = initial_n # ss_outputs['tau_bq'] = tau_bq # ss_outputs['g_n_vector'] = g_n_vector # ss_outputs['output_dir'] = output_base # with open("ss_outputs.pkl", 'wb') as fp: # pickle.dump(ss_outputs, fp) w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_TPI(income_tax_params, tpi_params, iterative_params, initial_values, SS_values, output_dir=output_base) print "getting to here...." TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs) print "took {0} seconds to get that part done.".format(time.time() - tick)
'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho'] ''' ------------------------------------------------------------------------ Run SS with minimization to fit chi_b and chi_n ------------------------------------------------------------------------ ''' # This is the simulation before getting the replacement rate values sim_params = {} for key in param_names: sim_params[key] = globals()[key] #pickle.dump(dictionary, open("OUTPUT/Saved_moments/params_given.pkl", "w")) #call(['python', 'SS.py']) income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, get_baseline, calibrate_model) ''' ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ ''' ss_outputs['get_baseline'] = get_baseline income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \ b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params) ss_outputs['income_tax_params'] = income_tax_params
def test_SS_solver(): # Test SS.SS_solver function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_solver_inputs.pkl')) (b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params, baseline, fsolve_flag, baseline_spending) = input_tuple (bssmat, nssmat, chi_params, ss_params, income_tax_params, iterative_params, small_open_params) = params p = Specifications() (p.J, p.S, p.T, p.BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n_ss, tau_payroll, tau_bq, p.rho, p.omega_SS, p.budget_balance, alpha_T, p.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p.e, retire, p.mean_income_data, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon) = ss_params p.Z = np.ones(p.T + p.S) * Z p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.alpha_T = np.ones(p.T + p.S) * alpha_T p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.lambdas = lambdas.reshape(p.J, 1) p.imm_rates = imm_rates.reshape(1, p.S) p.tax_func_type = 'DEP' p.baseline = baseline p.baseline_spending = baseline_spending p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\ income_tax_params p.etr_params = np.transpose(etr_params.reshape( p.S, 1, etr_params.shape[-1]), (1, 0, 2)) p.mtrx_params = np.transpose(mtrx_params.reshape( p.S, 1, mtrx_params.shape[-1]), (1, 0, 2)) p.mtry_params = np.transpose(mtry_params.reshape( p.S, 1, mtry_params.shape[-1]), (1, 0, 2)) p.maxiter, p.mindist_SS = iterative_params p.chi_b, p.chi_n = chi_params p.small_open, firm_r, hh_r = small_open_params p.firm_r = np.ones(p.T + p.S) * firm_r p.hh_r = np.ones(p.T + p.S) * hh_r p.num_workers = 1 expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/SS_solver_outputs.pkl')) BQss = expected_dict['BQss'] test_dict = SS.SS_solver(b_guess_init, n_guess_init, rss, BQss, T_Hss, factor_ss, Yss, p, None, fsolve_flag) # delete values key-value pairs that are not in both dicts del expected_dict['bssmat'], expected_dict['chi_n'], expected_dict['chi_b'] del expected_dict['Iss_total'] del test_dict['etr_ss'], test_dict['mtrx_ss'], test_dict['mtry_ss'] test_dict['IITpayroll_revenue'] = (test_dict['total_revenue_ss'] - test_dict['business_revenue']) del test_dict['T_Pss'], test_dict['T_BQss'], test_dict['T_Wss'] del test_dict['K_d_ss'], test_dict['K_f_ss'], test_dict['D_d_ss'] del test_dict['D_f_ss'], test_dict['I_d_ss'] del test_dict['debt_service_f'], test_dict['new_borrowing_f'] del test_dict['bqssmat'], test_dict['T_Css'], test_dict['Iss_total'] test_dict['revenue_ss'] = test_dict.pop('total_revenue_ss') for k, v in expected_dict.items(): print('Testing ', k) assert(np.allclose(test_dict[k], v))
def runner_SS( output_base, baseline_dir, baseline=False, analytical_mtrs=False, age_specific=False, reform={}, user_params={}, guid="", run_micro=True, ): from ogusa import parameters, demographics, income, utils from ogusa import txfunc tick = time.time() # Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ss_dir = os.path.join(output_base, "SS") tpi_dir = os.path.join(output_base, "TPI") dirs = [saved_moments_dir, ss_dir, tpi_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate( baseline=baseline, analytical_mtrs=analytical_mtrs, age_specific=age_specific, start_year=user_params["start_year"], reform=reform, guid=guid, ) print ("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params["analytical_mtrs"] = analytical_mtrs # Modify ogusa parameters based on user input if "frisch" in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation(user_params["frisch"], run_params["ltilde"]) run_params["b_ellipse"] = b_ellipse run_params["upsilon"] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if "g_y_annual" in user_params: print "updating g_y_annual and associated" ending_age = run_params["ending_age"] starting_age = run_params["starting_age"] S = run_params["S"] g_y = (1 + user_params["g_y_annual"]) ** (float(ending_age - starting_age) / S) - 1 run_params["g_y"] = g_y run_params.update(user_params) from ogusa import SS, TPI """ **** CALL CALIBRATION here if boolean flagged **** """ calibrate_model = False # if calibrate_model: # chi_b, chi_n = calibrate.(income_tax_params, ss_params, iterative_params, chi_params, baseline, # calibrate_model, output_dir=output_base, baseline_dir=baseline_dir) # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = [ "S", "J", "T", "BW", "lambdas", "starting_age", "ending_age", "beta", "sigma", "alpha", "nu", "Z", "delta", "E", "ltilde", "g_y", "maxiter", "mindist_SS", "mindist_TPI", "analytical_mtrs", "b_ellipse", "k_ellipse", "upsilon", "chi_b_guess", "chi_n_guess", "etr_params", "mtrx_params", "mtry_params", "tau_payroll", "tau_bq", "retire", "mean_income_data", "g_n_vector", "h_wealth", "p_wealth", "m_wealth", "omega", "g_n_ss", "omega_SS", "surv_rate", "imm_rates", "e", "rho", "omega_S_preTP", ] """ ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ """ sim_params = {} for key in param_names: sim_params[key] = run_params[key] sim_params["output_dir"] = output_base sim_params["run_params"] = run_params income_tax_params, ss_params, iterative_params, chi_params = SS.create_steady_state_parameters(**sim_params) ss_outputs = SS.run_SS( income_tax_params, ss_params, iterative_params, chi_params, baseline, baseline_dir=baseline_dir ) """ ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ """ if baseline: utils.mkdirs(os.path.join(baseline_dir, "SS")) ss_dir = os.path.join(baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(output_base, "SS")) ss_dir = os.path.join(output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb"))
def runner(output_base, input_dir, baseline=False, analytical_mtrs=True, reform={}, user_params={}, guid='', run_micro=True): from ogusa import parameters, wealth, labor, demographics, income from ogusa import txfunc tick = time.time() #Create output directory structure saved_moments_dir = os.path.join(output_base, "Saved_moments") ssinit_dir = os.path.join(output_base, "SSinit") tpiinit_dir = os.path.join(output_base, "TPIinit") dirs = [saved_moments_dir, ssinit_dir, tpiinit_dir] for _dir in dirs: try: print "making dir: ", _dir os.makedirs(_dir) except OSError as oe: pass if run_micro: txfunc.get_tax_func_estimate(baseline=baseline, analytical_mtrs=analytical_mtrs, reform=reform, guid=guid) print("in runner, baseline is ", baseline) run_params = ogusa.parameters.get_parameters(baseline=baseline, guid=guid) run_params['analytical_mtrs'] = analytical_mtrs # Modify ogusa parameters based on user input if 'frisch' in user_params: print "updating fricsh and associated" b_ellipse, upsilon = ogusa.elliptical_u_est.estimation( user_params['frisch'], run_params['ltilde']) run_params['b_ellipse'] = b_ellipse run_params['upsilon'] = upsilon run_params.update(user_params) # Modify ogusa parameters based on user input if 'g_y_annual' in user_params: print "updating g_y_annual and associated" g_y = (1 + user_params['g_y_annual'])**( float(ending_age - starting_age) / S) - 1 run_params['g_y'] = g_y run_params.update(user_params) globals().update(run_params) from ogusa import SS, TPI # Generate Wealth data moments wealth.get_wealth_data(lambdas, J, flag_graphs, output_dir=input_dir) # Generate labor data moments labor.labor_data_moments(flag_graphs, output_dir=input_dir) get_baseline = True calibrate_model = False # List of parameter names that will not be changing (unless we decide to # change them for a tax experiment) param_names = [ 'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI', 'analytical_mtrs', 'b_ellipse', 'k_ellipse', 'upsilon', 'chi_b_guess', 'chi_n_guess', 'etr_params', 'mtrx_params', 'mtry_params', 'tau_payroll', 'tau_bq', 'calibrate_model', 'retire', 'mean_income_data', 'g_n_vector', 'h_wealth', 'p_wealth', 'm_wealth', 'get_baseline', 'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho' ] ''' ------------------------------------------------------------------------ Run SS with minimization to fit chi_b and chi_n ------------------------------------------------------------------------ ''' # This is the simulation before getting the replacement rate values sim_params = {} glbs = globals() lcls = locals() for key in param_names: if key in glbs: sim_params[key] = glbs[key] else: sim_params[key] = lcls[key] sim_params['output_dir'] = input_dir sim_params['run_params'] = run_params income_tax_params, wealth_tax_params, ellipse_params, ss_parameters, iterative_params = SS.create_steady_state_parameters( **sim_params) ss_outputs = SS.run_steady_state(income_tax_params, ss_parameters, iterative_params, get_baseline, calibrate_model, output_dir=input_dir) ''' ------------------------------------------------------------------------ Run the baseline TPI simulation ------------------------------------------------------------------------ ''' ss_outputs['get_baseline'] = get_baseline sim_params['input_dir'] = input_dir income_tax_params, wealth_tax_params, ellipse_params, parameters, N_tilde, omega_stationary, K0, b_sinit, \ b_splus1init, L0, Y0, w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n = TPI.create_tpi_params(**sim_params) ss_outputs['income_tax_params'] = income_tax_params ss_outputs['wealth_tax_params'] = wealth_tax_params ss_outputs['ellipse_params'] = ellipse_params ss_outputs['parameters'] = parameters ss_outputs['N_tilde'] = N_tilde ss_outputs['omega_stationary'] = omega_stationary ss_outputs['K0'] = K0 ss_outputs['b_sinit'] = b_sinit ss_outputs['b_splus1init'] = b_splus1init ss_outputs['L0'] = L0 ss_outputs['Y0'] = Y0 ss_outputs['r0'] = r0 ss_outputs['BQ0'] = BQ0 ss_outputs['T_H_0'] = T_H_0 ss_outputs['tax0'] = tax0 ss_outputs['c0'] = c0 ss_outputs['initial_b'] = initial_b ss_outputs['initial_n'] = initial_n ss_outputs['tau_bq'] = tau_bq ss_outputs['g_n_vector'] = g_n_vector ss_outputs['output_dir'] = input_dir with open("ss_outputs.pkl", 'wb') as fp: pickle.dump(ss_outputs, fp) w_path, r_path, T_H_path, BQ_path, Y_path = TPI.run_time_path_iteration( **ss_outputs) print "getting to here...." TPI.TP_solutions(w_path, r_path, T_H_path, BQ_path, **ss_outputs) print "took {0} seconds to get that part done.".format(time.time() - tick)
def test_run_TPI(): # Test TPI.run_TPI function. Provide inputs to function and # ensure that output returned matches what it has been before. input_tuple = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/run_TPI_inputs.pkl')) (income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params, output_dir, baseline_spending) = input_tuple tpi_params = tpi_params + [True] initial_values = initial_values + (0.0,) p = Specifications() (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau, tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas, p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n, theta, p.baseline) = tpi_params new_param_values = { 'J': J, 'S': S, 'T': T } # update parameters instance with new values for test p.update_specifications(new_param_values, raise_errors=False) (J, S, T, BW, p.beta, p.sigma, p.alpha, p.gamma, p.epsilon, Z, p.delta, p.ltilde, p.nu, p.g_y, p.g_n, tau_b, delta_tau, tau_payroll, tau_bq, p.rho, p.omega, N_tilde, lambdas, p.imm_rates, p.e, retire, p.mean_income_data, factor, h_wealth, p_wealth, m_wealth, p.b_ellipse, p.upsilon, p.chi_b, p.chi_n, theta, p.baseline) = tpi_params p.Z = np.ones(p.T + p.S) * Z p.tau_bq = np.ones(p.T + p.S) * 0.0 p.tau_payroll = np.ones(p.T + p.S) * tau_payroll p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.h_wealth = np.ones(p.T + p.S) * h_wealth p.p_wealth = np.ones(p.T + p.S) * p_wealth p.m_wealth = np.ones(p.T + p.S) * m_wealth p.retire = (np.ones(p.T + p.S) * retire).astype(int) p.small_open, ss_firm_r, ss_hh_r = small_open_params p.ss_firm_r = np.ones(p.T + p.S) * ss_firm_r p.ss_hh_r = np.ones(p.T + p.S) * ss_hh_r p.maxiter, p.mindist_SS, p.mindist_TPI = iterative_params (p.budget_balance, alpha_T, alpha_G, p.tG1, p.tG2, p.rho_G, p.debt_ratio_ss) = fiscal_params p.alpha_T = np.concatenate((alpha_T, np.ones(40) * alpha_T[-1])) p.alpha_G = np.concatenate((alpha_G, np.ones(40) * alpha_G[-1])) (tau_b, delta_tau) = biz_tax_params p.tau_b = np.ones(p.T + p.S) * tau_b p.delta_tau = np.ones(p.T + p.S) * delta_tau p.analytical_mtrs, etr_params, mtrx_params, mtry_params =\ income_tax_params p.etr_params = np.transpose(etr_params, (1, 0, 2))[:p.T, :, :] p.mtrx_params = np.transpose(mtrx_params, (1, 0, 2))[:p.T, :, :] p.mtry_params = np.transpose(mtry_params, (1, 0, 2))[:p.T, :, :] p.lambdas = lambdas.reshape(p.J, 1) p.output = output_dir p.baseline_spending = baseline_spending p.num_workers = 1 (K0, b_sinit, b_splus1init, factor, initial_b, initial_n, p.omega_S_preTP, initial_debt, D0) = initial_values # Need to run SS first to get results ss_outputs = SS.run_SS(p, None) if p.baseline: utils.mkdirs(os.path.join(p.baseline_dir, "SS")) ss_dir = os.path.join(p.baseline_dir, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) else: utils.mkdirs(os.path.join(p.output_base, "SS")) ss_dir = os.path.join(p.output_base, "SS/SS_vars.pkl") pickle.dump(ss_outputs, open(ss_dir, "wb")) test_dict = TPI.run_TPI(p, None) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data/run_TPI_outputs.pkl')) # delete values key-value pairs that are not in both dicts del test_dict['etr_path'], test_dict['mtrx_path'], test_dict['mtry_path'] del test_dict['bmat_s'] test_dict['b_mat'] = test_dict.pop('bmat_splus1') test_dict['REVENUE'] = test_dict.pop('total_revenue') test_dict['IITpayroll_revenue'] = (test_dict['REVENUE'][:160] - test_dict['business_revenue']) del test_dict['T_P'], test_dict['T_BQ'], test_dict['T_W'] del test_dict['resource_constraint_error'], test_dict['T_C'] del test_dict['r_gov'], test_dict['r_hh'] for k, v in expected_dict.items(): try: assert(np.allclose(test_dict[k], v, rtol=1e-04, atol=1e-04)) except ValueError: assert(np.allclose(test_dict[k], v[:p.T, :, :], rtol=1e-04, atol=1e-04))