def test_run_SS(baseline, param_updates, filename, dask_client): # Test SS.run_SS function. Provide inputs to function and # ensure that output returned matches what it has been before. SS.ENFORCE_SOLUTION_CHECKS = True #False # if running reform, then need to solve baseline first to get values if baseline is False: p_base = Specifications(output_base=constants.BASELINE_DIR, baseline_dir=constants.BASELINE_DIR, baseline=True, num_workers=NUM_WORKERS) p_base.update_specifications(param_updates) if p_base.use_zeta: p_base.update_specifications({ 'initial_guess_r_SS': 0.10, 'initial_guess_TR_SS': 0.02 }) p_base.baseline_spending = False base_ss_outputs = SS.run_SS(p_base, client=dask_client) utils.mkdirs(os.path.join(constants.BASELINE_DIR, "SS")) ss_dir = os.path.join(constants.BASELINE_DIR, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(base_ss_outputs, f) # now run specification for test p = Specifications(baseline=baseline, num_workers=NUM_WORKERS) p.update_specifications(param_updates) test_dict = SS.run_SS(p, client=dask_client) expected_dict = utils.safe_read_pickle( os.path.join(CUR_PATH, 'test_io_data', filename)) for k, v in expected_dict.items(): print('Checking item = ', k) assert (np.allclose(test_dict[k], v, atol=1e-06))
def test_makedirs(tmp_path): ''' Test of utils.makedirs() function ''' utils.mkdirs(tmp_path) assert os.path.exists(tmp_path)
def test_run_TPI(baseline, param_updates, filename, tmp_path, dask_client): ''' Test TPI.run_TPI function. Provide inputs to function and ensure that output returned matches what it has been before. ''' baseline_dir = os.path.join(CUR_PATH, 'baseline') if baseline: output_base = baseline_dir else: output_base = os.path.join(CUR_PATH, 'reform') p = Specifications(baseline=baseline, baseline_dir=baseline_dir, output_base=output_base, num_workers=NUM_WORKERS) test_params = TEST_PARAM_DICT.copy() test_params.update(param_updates) p.update_specifications(test_params) p.maxiter = 2 # this test runs through just two iterations # Need to run SS first to get results SS.ENFORCE_SOLUTION_CHECKS = False ss_outputs = SS.run_SS(p, client=dask_client) if p.baseline: utils.mkdirs(os.path.join(p.baseline_dir, "SS")) ss_dir = os.path.join(p.baseline_dir, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) else: utils.mkdirs(os.path.join(p.output_base, "SS")) ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) TPI.ENFORCE_SOLUTION_CHECKS = False test_dict = TPI.run_TPI(p, client=dask_client) expected_dict = utils.safe_read_pickle(filename) for k, v in expected_dict.items(): print('Max diff in ', k, ' = ') try: print(np.absolute(test_dict[k][:p.T] - v[:p.T]).max()) except ValueError: print(np.absolute(test_dict[k][:p.T, :, :] - v[:p.T, :, :]).max()) for k, v in expected_dict.items(): try: assert (np.allclose(test_dict[k][:p.T], v[:p.T], rtol=1e-04, atol=1e-04)) except ValueError: assert (np.allclose(test_dict[k][:p.T, :, :], v[:p.T, :, :], rtol=1e-04, atol=1e-04))
def test_constant_demographics_TPI(dask_client): ''' This tests solves the model under the assumption of constant demographics, a balanced budget, and tax functions that do not vary over time. In this case, given how initial guesses for the time path are made, the time path should be solved for on the first iteration and the values all along the time path should equal their steady-state values. ''' # Create output directory structure spec = Specifications(output_base=CUR_PATH, baseline_dir=OUTPUT_DIR, baseline=True, num_workers=NUM_WORKERS) og_spec = { 'constant_demographics': True, 'budget_balance': True, 'zero_taxes': True, 'maxiter': 2, 'r_gov_shift': 0.0, 'zeta_D': [0.0, 0.0], 'zeta_K': [0.0, 0.0], 'debt_ratio_ss': 1.0, 'initial_foreign_debt_ratio': 0.0, 'start_year': 2019, 'cit_rate': [0.0], 'PIA_rate_bkt_1': 0.0, 'PIA_rate_bkt_2': 0.0, 'PIA_rate_bkt_3': 0.0, 'eta': (spec.omega_SS.reshape(spec.S, 1) * spec.lambdas.reshape(1, spec.J)) } spec.update_specifications(og_spec) spec.etr_params = np.zeros( (spec.T + spec.S, spec.S, spec.etr_params.shape[2])) spec.mtrx_params = np.zeros( (spec.T + spec.S, spec.S, spec.mtrx_params.shape[2])) spec.mtry_params = np.zeros( (spec.T + spec.S, spec.S, spec.mtry_params.shape[2])) # Run SS ss_outputs = SS.run_SS(spec, client=dask_client) # save SS results utils.mkdirs(os.path.join(OUTPUT_DIR, "SS")) ss_dir = os.path.join(OUTPUT_DIR, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) # Run TPI tpi_output = TPI.run_TPI(spec, client=dask_client) assert (np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs['bssmat_splus1']))
def run_model(meta_param_dict, adjustment): """ Initializes classes from OG-USA that compute the model under different policies. Then calls function get output objects. """ print("Meta_param_dict = ", meta_param_dict) print("adjustment dict = ", adjustment) meta_params = MetaParams() meta_params.adjust(meta_param_dict) if meta_params.data_source == "PUF": data = retrieve_puf(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) # set name of cached baseline file in case use below cached_pickle = "TxFuncEst_baseline_PUF.pkl" else: data = "cps" # set name of cached baseline file in case use below cached_pickle = "TxFuncEst_baseline_CPS.pkl" # Get TC params adjustments iit_mods = convert_policy_adjustment( adjustment["Tax-Calculator Parameters"] ) # Create output directory structure base_dir = os.path.join(CUR_DIR, BASELINE_DIR) reform_dir = os.path.join(CUR_DIR, REFORM_DIR) dirs = [base_dir, reform_dir] for _dir in dirs: utils.mkdirs(_dir) # Dask parmeters client = Client() num_workers = 5 # TODO: Swap to these parameters when able to specify tax function # and model workers separately # num_workers_txf = 5 # num_workers_mod = 6 # whether to estimate tax functions from microdata run_micro = True time_path = meta_param_dict["time_path"][0]["value"] # filter out OG-USA params that will not change between baseline and # reform runs (these are the non-policy parameters) filtered_ogusa_params = {} constant_param_set = { "frisch", "beta_annual", "sigma", "g_y_annual", "gamma", "epsilon", "Z", "delta_annual", "small_open", "world_int_rate", "initial_debt_ratio", "initial_foreign_debt_ratio", "zeta_D", "zeta_K", "tG1", "tG2", "rho_G", "debt_ratio_ss", "budget_balance", } filtered_ogusa_params = OrderedDict() for k, v in adjustment["OG-USA Parameters"].items(): if k in constant_param_set: filtered_ogusa_params[k] = v # Solve baseline model start_year = meta_param_dict["year"][0]["value"] if start_year == 2020: OGPATH = inspect.getfile(SS) OGDIR = os.path.dirname(OGPATH) tax_func_path = None # os.path.join(OGDIR, 'data', 'tax_functions', # cached_pickle) run_micro_baseline = True else: tax_func_path = None run_micro_baseline = True base_spec = { **{ "start_year": start_year, "tax_func_type": "DEP", "age_specific": False, }, **filtered_ogusa_params, } base_params = Specifications( output_base=base_dir, baseline_dir=base_dir, baseline=True, num_workers=num_workers, ) base_params.update_specifications( json.load( open( os.path.join( "..", "..", "ogusa", "ogusa_default_parameters.json" ) ) ) ) base_params.update_specifications(base_spec) BW = TC_LAST_YEAR - start_year + 1 base_params.BW = BW # Will need to figure out how to handle default tax functions here # For now, estimating tax functions even for baseline c_base = Calibration( base_params, iit_reform={}, estimate_tax_functions=True, data=data, client=client, ) # update tax function parameters in Specifications Object d_base = c_base.get_dict() # additional parameters to change updated_txfunc_params = { "etr_params": d_base["etr_params"], "mtrx_params": d_base["mtrx_params"], "mtry_params": d_base["mtry_params"], "mean_income_data": d_base["mean_income_data"], "frac_tax_payroll": d_base["frac_tax_payroll"], } base_params.update_specifications(updated_txfunc_params) base_ss = SS.run_SS(base_params, client=client) utils.mkdirs(os.path.join(base_dir, "SS")) base_ss_dir = os.path.join(base_dir, "SS", "SS_vars.pkl") with open(base_ss_dir, "wb") as f: pickle.dump(base_ss, f) if time_path: base_tpi = TPI.run_TPI(base_params, client=client) tpi_dir = os.path.join(base_dir, "TPI", "TPI_vars.pkl") with open(tpi_dir, "wb") as f: pickle.dump(base_tpi, f) else: base_tpi = None # Solve reform model reform_spec = base_spec reform_spec.update(adjustment["OG-USA Parameters"]) reform_params = Specifications( output_base=reform_dir, baseline_dir=base_dir, baseline=False, num_workers=num_workers, ) reform_params.update_specifications( json.load( open( os.path.join( "..", "..", "ogusa", "ogusa_default_parameters.json" ) ) ) ) reform_params.update_specifications(reform_spec) reform_params.BW = BW c_reform = Calibration( reform_params, iit_reform=iit_mods, estimate_tax_functions=True, data=data, client=client, ) # update tax function parameters in Specifications Object d_reform = c_reform.get_dict() # additional parameters to change updated_txfunc_params = { "etr_params": d_reform["etr_params"], "mtrx_params": d_reform["mtrx_params"], "mtry_params": d_reform["mtry_params"], "mean_income_data": d_reform["mean_income_data"], "frac_tax_payroll": d_reform["frac_tax_payroll"], } reform_params.update_specifications(updated_txfunc_params) reform_ss = SS.run_SS(reform_params, client=client) utils.mkdirs(os.path.join(reform_dir, "SS")) reform_ss_dir = os.path.join(reform_dir, "SS", "SS_vars.pkl") with open(reform_ss_dir, "wb") as f: pickle.dump(reform_ss, f) if time_path: reform_tpi = TPI.run_TPI(reform_params, client=client) else: reform_tpi = None comp_dict = comp_output( base_params, base_ss, reform_params, reform_ss, time_path, base_tpi, reform_tpi, ) # Shut down client and make sure all of its references are # cleaned up. client.close() del client return comp_dict
def get_data( baseline=False, start_year=DEFAULT_START_YEAR, reform={}, data=None, path=CUR_PATH, client=None, num_workers=1, ): """ This function creates dataframes of micro data with marginal tax rates and information to compute effective tax rates from the Tax-Calculator output. The resulting dictionary of dataframes is returned and saved to disk in a pickle file. Args: baseline (boolean): True if baseline tax policy calculator_start_year (int): first year of budget window reform (dictionary): IIT policy reform parameters, None if baseline data (DataFrame or str): DataFrame or path to datafile for Records object path (str): path to save microdata files to client (Dask Client object): client for Dask multiprocessing num_workers (int): number of workers to use for Dask multiprocessing Returns: micro_data_dict (dict): dict of Pandas Dataframe, one for each year from start_year to the maximum year Tax-Calculator can analyze taxcalc_version (str): version of Tax-Calculator used """ # Compute MTRs and taxes or each year, but not beyond TC_LAST_YEAR lazy_values = [] for year in range(start_year, TC_LAST_YEAR + 1): lazy_values.append( delayed(taxcalc_advance)(baseline, start_year, reform, data, year) ) if client: # pragma: no cover futures = client.compute(lazy_values, num_workers=num_workers) results = client.gather(futures) else: results = results = compute( *lazy_values, scheduler=dask.multiprocessing.get, num_workers=num_workers, ) # dictionary of data frames to return micro_data_dict = {} for i, result in enumerate(results): year = start_year + i micro_data_dict[str(year)] = DataFrame(result) if baseline: pkl_path = os.path.join(path, "micro_data_baseline.pkl") else: pkl_path = os.path.join(path, "micro_data_policy.pkl") utils.mkdirs(path) with open(pkl_path, "wb") as f: pickle.dump(micro_data_dict, f) # Do some garbage collection del results # Pull Tax-Calc version for reference taxcalc_version = pkg_resources.get_distribution("taxcalc").version return micro_data_dict, taxcalc_version
def get_tax_function_parameters( self, p, iit_reform={}, guid="", data="", client=None, num_workers=1, run_micro=False, tax_func_path=None, ): """ Reads pickle file of tax function parameters or estimates the parameters from microsimulation model output. Args: client (Dask client object): client run_micro (bool): whether to estimate parameters from microsimulation model tax_func_path (string): path where find or save tax function parameter estimates Returns: None """ # set paths if none given if tax_func_path is None: if p.baseline: pckl = "TxFuncEst_baseline{}.pkl".format(guid) tax_func_path = os.path.join(p.output_base, pckl) print("Using baseline tax parameters from ", tax_func_path) else: pckl = "TxFuncEst_policy{}.pkl".format(guid) tax_func_path = os.path.join(p.output_base, pckl) print( "Using reform policy tax parameters from ", tax_func_path ) # create directory for tax function pickles to be saved to mkdirs(os.path.split(tax_func_path)[0]) # If run_micro is false, check to see if parameters file exists # and if it is consistent with Specifications instance if not run_micro: dict_params, run_micro = self.read_tax_func_estimate(tax_func_path) if run_micro: micro_data, taxcalc_version = get_micro_data.get_data( baseline=p.baseline, start_year=p.start_year, reform=iit_reform, data=data, path=p.output_base, client=client, num_workers=num_workers, ) p.BW = len(micro_data) dict_params = txfunc.tax_func_estimate( # pragma: no cover micro_data, p.BW, p.S, p.starting_age, p.ending_age, start_year=p.start_year, baseline=p.baseline, analytical_mtrs=p.analytical_mtrs, tax_func_type=p.tax_func_type, age_specific=p.age_specific, reform=iit_reform, data=data, client=client, num_workers=num_workers, tax_func_path=tax_func_path, ) mean_income_data = dict_params["tfunc_avginc"][0] frac_tax_payroll = np.append( dict_params["tfunc_frac_tax_payroll"], np.ones(p.T + p.S - p.BW) * dict_params["tfunc_frac_tax_payroll"][-1], ) # Reorder indices of tax function and tile for all years after # budget window ends num_etr_params = dict_params["tfunc_etr_params_S"].shape[2] num_mtrx_params = dict_params["tfunc_mtrx_params_S"].shape[2] num_mtry_params = dict_params["tfunc_mtry_params_S"].shape[2] # First check to see if tax parameters that are used were # estimated with a budget window and ages that are as long as # the those implied based on the start year and model age. # N.B. the tax parameters dictionary does not save the years # that correspond to the parameter estimates, so the start year # used there may name match what is used in a run that reads in # some cached tax function parameters. Likewise for age. params_list = ["etr", "mtrx", "mtry"] BW_in_tax_params = dict_params["tfunc_etr_params_S"].shape[1] S_in_tax_params = dict_params["tfunc_etr_params_S"].shape[0] if p.BW != BW_in_tax_params: print( "Warning: There is a discrepency between the start" + " year of the model and that of the tax functions!!" ) # After printing warning, make it work by tiling if p.BW > BW_in_tax_params: for item in params_list: dict_params["tfunc_" + item + "_params_S"] = np.concatenate( ( dict_params["tfunc_" + item + "_params_S"], np.tile( dict_params["tfunc_" + item + "_params_S"][ :, -1, : ].reshape(S_in_tax_params, 1, num_etr_params), (1, p.BW - BW_in_tax_params, 1), ), ), axis=1, ) dict_params["tfunc_avg_" + item] = np.append( dict_params["tfunc_avg_" + item], np.tile( dict_params["tfunc_avg_" + item][-1], (p.BW - BW_in_tax_params), ), ) if p.S != S_in_tax_params: print( "Warning: There is a discrepency between the ages" + " used in the model and those in the tax functions!!" ) # After printing warning, make it work by tiling if p.S > S_in_tax_params: for item in params_list: dict_params["tfunc_" + item + "_params_S"] = np.concatenate( ( dict_params["tfunc_" + item + "_params_S"], np.tile( dict_params["tfunc_" + item + "_params_S"][ -1, :, : ].reshape(1, p.BW, num_etr_params), (p.S - S_in_tax_params, 1, 1), ), ), axis=0, ) etr_params = np.empty((p.T, p.S, num_etr_params)) mtrx_params = np.empty((p.T, p.S, num_mtrx_params)) mtry_params = np.empty((p.T, p.S, num_mtry_params)) etr_params[: p.BW, :, :] = np.transpose( dict_params["tfunc_etr_params_S"][: p.S, : p.BW, :], axes=[1, 0, 2] ) etr_params[p.BW :, :, :] = np.tile( np.transpose( dict_params["tfunc_etr_params_S"][: p.S, -1, :].reshape( p.S, 1, num_etr_params ), axes=[1, 0, 2], ), (p.T - p.BW, 1, 1), ) mtrx_params[: p.BW, :, :] = np.transpose( dict_params["tfunc_mtrx_params_S"][: p.S, : p.BW, :], axes=[1, 0, 2], ) mtrx_params[p.BW :, :, :] = np.transpose( dict_params["tfunc_mtrx_params_S"][: p.S, -1, :].reshape( p.S, 1, num_mtrx_params ), axes=[1, 0, 2], ) mtry_params[: p.BW, :, :] = np.transpose( dict_params["tfunc_mtry_params_S"][: p.S, : p.BW, :], axes=[1, 0, 2], ) mtry_params[p.BW :, :, :] = np.transpose( dict_params["tfunc_mtry_params_S"][: p.S, -1, :].reshape( p.S, 1, num_mtry_params ), axes=[1, 0, 2], ) if p.constant_rates: print("Using constant rates!") # Make all ETRs equal the average etr_params = np.zeros(etr_params.shape) # set shift to average rate etr_params[: p.BW, :, 10] = np.tile( dict_params["tfunc_avg_etr"].reshape(p.BW, 1), (1, p.S) ) etr_params[p.BW :, :, 10] = dict_params["tfunc_avg_etr"][-1] # # Make all MTRx equal the average mtrx_params = np.zeros(mtrx_params.shape) # set shift to average rate mtrx_params[: p.BW, :, 10] = np.tile( dict_params["tfunc_avg_mtrx"].reshape(p.BW, 1), (1, p.S) ) mtrx_params[p.BW :, :, 10] = dict_params["tfunc_avg_mtrx"][-1] # # Make all MTRy equal the average mtry_params = np.zeros(mtry_params.shape) # set shift to average rate mtry_params[: p.BW, :, 10] = np.tile( dict_params["tfunc_avg_mtry"].reshape(p.BW, 1), (1, p.S) ) mtry_params[p.BW :, :, 10] = dict_params["tfunc_avg_mtry"][-1] if p.zero_taxes: print("Zero taxes!") etr_params = np.zeros(etr_params.shape) mtrx_params = np.zeros(mtrx_params.shape) mtry_params = np.zeros(mtry_params.shape) tax_param_dict = { "etr_params": etr_params, "mtrx_params": mtrx_params, "mtry_params": mtry_params, "taxcalc_version": taxcalc_version, "mean_income_data": mean_income_data, "frac_tax_payroll": frac_tax_payroll, } return tax_param_dict
def plot_all(base_output_path, reform_output_path, save_path): ''' Function to plot all default output plots. Args: base_output_path (str): path to baseline results reform_output_path (str): path to reform results save_path (str): path to save plots to Returns: None: All output figures saved to disk. ''' # Make directory in case it doesn't exist utils.mkdirs(save_path) # Read in data # Read in TPI output and parameters base_tpi = utils.safe_read_pickle( os.path.join(base_output_path, 'TPI', 'TPI_vars.pkl')) base_ss = utils.safe_read_pickle( os.path.join(base_output_path, 'SS', 'SS_vars.pkl')) base_params = utils.safe_read_pickle( os.path.join(base_output_path, 'model_params.pkl')) reform_tpi = utils.safe_read_pickle( os.path.join(reform_output_path, 'TPI', 'TPI_vars.pkl')) reform_ss = utils.safe_read_pickle( os.path.join(reform_output_path, 'SS', 'SS_vars.pkl')) reform_params = utils.safe_read_pickle( os.path.join(reform_output_path, 'model_params.pkl')) # Percentage changes in macro vars (Y, K, L, C) plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, var_list=['Y', 'K', 'L', 'C'], plot_type='pct_diff', num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Percentage Changes in Macro Aggregates', path=os.path.join(save_path, 'MacroAgg_PctChange.png')) # Percentage change in fiscal vars (D, G, TR, Rev) plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, var_list=['D', 'G', 'TR', 'total_tax_revenue'], plot_type='pct_diff', num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Percentage Changes in Fiscal Variables', path=os.path.join(save_path, 'Fiscal_PctChange.png')) # r and w in baseline and reform -- vertical lines at tG1, tG2 plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, var_list=['r'], plot_type='levels', num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Real Interest Rates Under Baseline and Reform', path=os.path.join(save_path, 'InterestRates.png')) plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, var_list=['w'], plot_type='levels', num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Wage Rates Under Baseline and Reform', path=os.path.join(save_path, 'WageRates.png')) # Debt-GDP in base and reform-- vertical lines at tG1, tG2 plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params, var_list=['D'], num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Debt-to-GDP', path=os.path.join(save_path, 'DebtGDPratio.png')) # Tax revenue to GDP in base and reform-- vertical lines at tG1, tG2 plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params, var_list=['total_tax_revenue'], num_years_to_plot=150, start_year=base_params.start_year, vertical_line_years=[ base_params.start_year + base_params.tG1, base_params.start_year + base_params.tG2 ], plot_title='Tax Revenue to GDP', path=os.path.join(save_path, 'RevenueGDPratio.png')) # Pct change in c, n, b, y, etr, mtrx, mtry by ability group over 10 years var_list = [ 'c_path', 'n_mat', 'bmat_splus1', 'etr_path', 'mtrx_path', 'mtry_path', 'y_before_tax_mat' ] title_list = [ 'consumption', 'labor supply', 'savings', 'effective tax rates', 'marginal tax rates on labor income', 'marginal tax rates on capital income', 'before tax income' ] path_list = ['Cons', 'Labor', 'Save', 'ETR', 'MTRx', 'MTRy', 'Income'] for i, v in enumerate(var_list): ability_bar(base_tpi, base_params, reform_tpi, reform_params, var=v, num_years=10, start_year=base_params.start_year, plot_title='Percentage changes in ' + title_list[i], path=os.path.join(save_path, 'PctChange_' + path_list[i] + '.png')) # lifetime profiles, base vs reform, SS for c, n, b, y - not by j var_list = [ 'cssmat', 'nssmat', 'bssmat_splus1', 'etr_ss', 'mtrx_ss', 'mtry_ss' ] for i, v in enumerate(var_list): ss_profiles(base_ss, base_params, reform_ss, reform_params, by_j=False, var=v, plot_title='Lifecycle Profile of ' + title_list[i], path=os.path.join( save_path, 'SSLifecycleProfile_' + path_list[i] + '.png')) # lifetime profiles, c, n , b, y by j, separately for base and reform for i, v in enumerate(var_list): ss_profiles(base_ss, base_params, by_j=True, var=v, plot_title='Lifecycle Profile of ' + title_list[i], path=os.path.join( save_path, 'SSLifecycleProfile_' + path_list[i] + '_Baseline.png')) ss_profiles(reform_ss, reform_params, by_j=True, var=v, plot_title='Lifecycle Profile of ' + title_list[i], path=os.path.join( save_path, 'SSLifecycleProfile_' + path_list[i] + '_Reform.png'))
def runner(p, time_path=True, client=None): ''' This function runs the OG-Core model, solving for the steady-state and (optionally) the time path equilibrium. Args: p (Specifications object): model parameters time_path (bool): whether to solve for the time path equilibrium client (Dask client object): client Returns: None ''' tick = time.time() # Create output directory structure ss_dir = os.path.join(p.output_base, "SS") tpi_dir = os.path.join(p.output_base, "TPI") dirs = [ss_dir, tpi_dir] for _dir in dirs: try: print("making dir: ", _dir) os.makedirs(_dir) except OSError: pass print('In runner, baseline is ', p.baseline) ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' ss_outputs = SS.run_SS(p, client=client) ''' ------------------------------------------------------------------------ Pickle SS results ------------------------------------------------------------------------ ''' utils.mkdirs(os.path.join(p.output_base, "SS")) ss_dir = os.path.join(p.output_base, "SS", "SS_vars.pkl") with open(ss_dir, "wb") as f: pickle.dump(ss_outputs, f) print('JUST SAVED SS output to ', ss_dir) # Save pickle with parameter values for the run param_dir = os.path.join(p.output_base, "model_params.pkl") with open(param_dir, "wb") as f: cloudpickle.dump((p), f) if time_path: ''' ------------------------------------------------------------------------ Run the TPI simulation ------------------------------------------------------------------------ ''' tpi_output = TPI.run_TPI(p, client=client) ''' ------------------------------------------------------------------------ Pickle TPI results ------------------------------------------------------------------------ ''' tpi_dir = os.path.join(p.output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") with open(tpi_vars, "wb") as f: pickle.dump(tpi_output, f) print("Time path iteration complete.") print("It took {0} seconds to get that part done.".format(time.time() - tick))
def plot_income_data(ages, abil_midp, abil_pcts, emat, output_dir=None, filesuffix=""): ''' This function graphs ability matrix in 3D, 2D, log, and nolog Args: ages (Numpy array) ages represented in sample, length S abil_midp (Numpy array): midpoints of income percentile bins in each ability group abil_pcts (Numpy array): percent of population in each lifetime income group, length J emat (Numpy array): effective labor units by age and lifetime income group, size SxJ filesuffix (str): suffix to be added to plot files Returns: None ''' J = abil_midp.shape[0] abil_mesh, age_mesh = np.meshgrid(abil_midp, ages) cmap1 = matplotlib.cm.get_cmap('summer') if output_dir: # Make sure that directory is created utils.mkdirs(output_dir) if J == 1: # Plot of 2D, J=1 in levels plt.figure() plt.plot(ages, emat) filename = "ability_2D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() # Plot of 2D, J=1 in logs plt.figure() plt.plot(ages, np.log(emat)) filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() else: # Plot of 3D, J>1 in levels fig10 = plt.figure() ax10 = fig10.gca(projection='3d') ax10.plot_surface(age_mesh, abil_mesh, emat, rstride=8, cstride=1, cmap=cmap1) ax10.set_xlabel(r'age-$s$') ax10.set_ylabel(r'ability type -$j$') ax10.set_zlabel(r'ability $e_{j,s}$') filename = "ability_3D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() # Plot of 3D, J>1 in logs fig11 = plt.figure() ax11 = fig11.gca(projection='3d') ax11.plot_surface(age_mesh, abil_mesh, np.log(emat), rstride=8, cstride=1, cmap=cmap1) ax11.set_xlabel(r'age-$s$') ax11.set_ylabel(r'ability type -$j$') ax11.set_zlabel(r'log ability $log(e_{j,s})$') filename = "ability_3D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() if J <= 10: # Restricted because of line and marker types # Plot of 2D lines from 3D version in logs ax = plt.subplot(111) linestyles = np.array([ "-", "--", "-.", ":", ]) markers = np.array(["x", "v", "o", "d", ">", "|"]) pct_lb = 0 for j in range(J): this_label = ( str(int(np.rint(pct_lb))) + " - " + str(int(np.rint(pct_lb + 100 * abil_pcts[j]))) + "%") pct_lb += 100 * abil_pcts[j] if j <= 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, linestyle=linestyles[j], color='black') elif j > 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, marker=markers[j - 4], color='black') ax.axvline(x=80, color='black', linestyle='--') box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_xlabel(r'age-$s$') ax.set_ylabel(r'log ability $log(e_{j,s})$') filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() else: if J <= 10: # Restricted because of line and marker types # Plot of 2D lines from 3D version in logs ax = plt.subplot(111) linestyles = np.array([ "-", "--", "-.", ":", ]) markers = np.array(["x", "v", "o", "d", ">", "|"]) pct_lb = 0 for j in range(J): this_label = (str(int(np.rint(pct_lb))) + " - " + str(int(np.rint(pct_lb + 100 * abil_pcts[j]))) + "%") pct_lb += 100 * abil_pcts[j] if j <= 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, linestyle=linestyles[j], color='black') elif j > 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, marker=markers[j - 4], color='black') ax.axvline(x=80, color='black', linestyle='--') box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_xlabel(r'age-$s$') ax.set_ylabel(r'log ability $log(e_{j,s})$') return ax
def run_TPI(p, client=None): ''' Solve for transition path equilibrium of OG-Core. Args: p (OG-Core Specifications object): model parameters client (Dask client object): client Returns: output (dictionary): dictionary with transition path solution results ''' # unpack tuples of parameters initial_values, ss_vars, theta, baseline_values = get_initial_SS_values(p) (B0, b_sinit, b_splus1init, factor, initial_b, initial_n) =\ initial_values (TRbaseline, Gbaseline, D0_baseline) = baseline_values # Create time path of UBI household benefits and aggregate UBI outlays ubi = p.ubi_nom_array / factor UBI = aggr.get_L(ubi[:p.T], p, 'TPI') print('Government spending breakpoints are tG1: ', p.tG1, '; and tG2:', p.tG2) # Initialize guesses at time paths # Make array of initial guesses for labor supply and savings guesses_b = utils.get_initial_path(initial_b, ss_vars['bssmat_splus1'], p, 'ratio') guesses_n = utils.get_initial_path(initial_n, ss_vars['nssmat'], p, 'ratio') b_mat = guesses_b n_mat = guesses_n ind = np.arange(p.S) # Get path for aggregate savings and labor supply L_init = np.ones((p.T + p.S, )) * ss_vars['Lss'] B_init = np.ones((p.T + p.S, )) * ss_vars['Bss'] L_init[:p.T] = aggr.get_L(n_mat[:p.T], p, 'TPI') B_init[1:p.T] = aggr.get_B(b_mat[:p.T], p, 'TPI', False)[:p.T - 1] B_init[0] = B0 K_init = B_init * ss_vars['Kss'] / ss_vars['Bss'] K = K_init K_d = K_init * ss_vars['K_d_ss'] / ss_vars['Kss'] K_f = K_init * ss_vars['K_f_ss'] / ss_vars['Kss'] L = L_init B = B_init Y = np.zeros_like(K) Y[:p.T] = firm.get_Y(K[:p.T], L[:p.T], p, 'TPI') Y[p.T:] = ss_vars['Yss'] r = np.zeros_like(Y) r[:p.T] = firm.get_r(Y[:p.T], K[:p.T], p, 'TPI') r[p.T:] = ss_vars['rss'] # For case where economy is small open econ r[p.zeta_K == 1] = p.world_int_rate[p.zeta_K == 1] # Compute other interest rates r_gov = fiscal.get_r_gov(r, p) r_p = aggr.get_r_p(r, r_gov, K, ss_vars['Dss']) # compute w w = np.zeros_like(r) w[:p.T] = firm.get_w_from_r(r[:p.T], p, 'TPI') w[p.T:] = ss_vars['wss'] # initial guesses at fiscal vars if p.budget_balance: if np.abs(ss_vars['TR_ss']) < 1e-13: TR_ss2 = 0.0 # sometimes SS is very small but not zero, # even if taxes are zero, this get's rid of the # approximation error, which affects the pct changes below else: TR_ss2 = ss_vars['TR_ss'] TR = np.ones(p.T + p.S) * TR_ss2 total_tax_revenue = TR - ss_vars['agg_pension_outlays'] G = np.zeros(p.T + p.S) D = np.zeros(p.T + p.S) D_d = np.zeros(p.T + p.S) D_f = np.zeros(p.T + p.S) else: if p.baseline_spending: TR = TRbaseline G = Gbaseline G[p.T:] = ss_vars['Gss'] else: TR = p.alpha_T * Y G = np.ones(p.T + p.S) * ss_vars['Gss'] D = np.ones(p.T + p.S) * ss_vars['Dss'] D_d = D * ss_vars['D_d_ss'] / ss_vars['Dss'] D_f = D * ss_vars['D_f_ss'] / ss_vars['Dss'] total_tax_revenue = np.ones(p.T + p.S) * ss_vars['total_tax_revenue'] # Initialize bequests BQ0 = aggr.get_BQ(r_p[0], initial_b, None, p, 'SS', True) if not p.use_zeta: BQ = np.zeros((p.T + p.S, p.J)) for j in range(p.J): BQ[:, j] = (list(np.linspace(BQ0[j], ss_vars['BQss'][j], p.T)) + [ss_vars['BQss'][j]] * p.S) BQ = np.array(BQ) else: BQ = (list(np.linspace(BQ0, ss_vars['BQss'], p.T)) + [ss_vars['BQss']] * p.S) BQ = np.array(BQ) TPIiter = 0 TPIdist = 10 euler_errors = np.zeros((p.T, 2 * p.S, p.J)) TPIdist_vec = np.zeros(p.maxiter) # TPI loop while (TPIiter < p.maxiter) and (TPIdist >= p.mindist_TPI): r_gov[:p.T] = fiscal.get_r_gov(r[:p.T], p) if not p.budget_balance: K[:p.T] = firm.get_K_from_Y(Y[:p.T], r[:p.T], p, 'TPI') r_p[:p.T] = aggr.get_r_p(r[:p.T], r_gov[:p.T], K[:p.T], D[:p.T]) outer_loop_vars = (r, w, r_p, BQ, TR, theta) euler_errors = np.zeros((p.T, 2 * p.S, p.J)) lazy_values = [] for j in range(p.J): guesses = (guesses_b[:, :, j], guesses_n[:, :, j]) lazy_values.append( delayed(inner_loop)(guesses, outer_loop_vars, initial_values, ubi, j, ind, p)) if client: futures = client.compute(lazy_values, num_workers=p.num_workers) results = client.gather(futures) else: results = results = compute(*lazy_values, scheduler=dask.multiprocessing.get, num_workers=p.num_workers) for j, result in enumerate(results): euler_errors[:, :, j], b_mat[:, :, j], n_mat[:, :, j] = result bmat_s = np.zeros((p.T, p.S, p.J)) bmat_s[0, 1:, :] = initial_b[:-1, :] bmat_s[1:, 1:, :] = b_mat[:p.T - 1, :-1, :] bmat_splus1 = np.zeros((p.T, p.S, p.J)) bmat_splus1[:, :, :] = b_mat[:p.T, :, :] etr_params_4D = np.tile( p.etr_params[:p.T, :, :].reshape(p.T, p.S, 1, p.etr_params.shape[2]), (1, 1, p.J, 1)) bqmat = household.get_bq(BQ, None, p, 'TPI') trmat = household.get_tr(TR, None, p, 'TPI') tax_mat = tax.net_taxes(r_p[:p.T], w[:p.T], bmat_s, n_mat[:p.T, :, :], bqmat[:p.T, :, :], factor, trmat[:p.T, :, :], ubi[:p.T, :, :], theta, 0, None, False, 'TPI', p.e, etr_params_4D, p) r_p_path = utils.to_timepath_shape(r_p) wpath = utils.to_timepath_shape(w) c_mat = household.get_cons(r_p_path[:p.T, :, :], wpath[:p.T, :, :], bmat_s, bmat_splus1, n_mat[:p.T, :, :], bqmat[:p.T, :, :], tax_mat, p.e, p.tau_c[:p.T, :, :], p) y_before_tax_mat = household.get_y(r_p_path[:p.T, :, :], wpath[:p.T, :, :], bmat_s[:p.T, :, :], n_mat[:p.T, :, :], p) (total_tax_rev, iit_payroll_tax_revenue, agg_pension_outlays, UBI_outlays, bequest_tax_revenue, wealth_tax_revenue, cons_tax_revenue, business_tax_revenue, payroll_tax_revenue, iit_revenue) = aggr.revenue(r_p[:p.T], w[:p.T], bmat_s, n_mat[:p.T, :, :], bqmat[:p.T, :, :], c_mat[:p.T, :, :], Y[:p.T], L[:p.T], K[:p.T], factor, ubi[:p.T, :, :], theta, etr_params_4D, p, 'TPI') total_tax_revenue[:p.T] = total_tax_rev dg_fixed_values = (Y, total_tax_revenue, agg_pension_outlays, UBI_outlays, TR, Gbaseline, D0_baseline) (Dnew, G[:p.T], D_d[:p.T], D_f[:p.T], new_borrowing, debt_service, new_borrowing_f) =\ fiscal.D_G_path(r_gov, dg_fixed_values, p) L[:p.T] = aggr.get_L(n_mat[:p.T], p, 'TPI') B[1:p.T] = aggr.get_B(bmat_splus1[:p.T], p, 'TPI', False)[:p.T - 1] K_demand_open = firm.get_K(L[:p.T], p.world_int_rate[:p.T], p, 'TPI') K[:p.T], K_d[:p.T], K_f[:p.T] = aggr.get_K_splits( B[:p.T], K_demand_open, D_d[:p.T], p.zeta_K[:p.T]) Ynew = firm.get_Y(K[:p.T], L[:p.T], p, 'TPI') rnew = r.copy() rnew[:p.T] = firm.get_r(Ynew[:p.T], K[:p.T], p, 'TPI') # For case where economy is small open econ r[p.zeta_K == 1] = p.world_int_rate[p.zeta_K == 1] r_gov_new = fiscal.get_r_gov(rnew, p) r_p_new = aggr.get_r_p(rnew[:p.T], r_gov_new[:p.T], K[:p.T], Dnew[:p.T]) # compute w wnew = firm.get_w_from_r(rnew[:p.T], p, 'TPI') b_mat_shift = np.append(np.reshape(initial_b, (1, p.S, p.J)), b_mat[:p.T - 1, :, :], axis=0) BQnew = aggr.get_BQ(r_p_new[:p.T], b_mat_shift, None, p, 'TPI', False) bqmat_new = household.get_bq(BQnew, None, p, 'TPI') (total_tax_rev, iit_payroll_tax_revenue, agg_pension_outlays, UBI_outlays, bequest_tax_revenue, wealth_tax_revenue, cons_tax_revenue, business_tax_revenue, payroll_tax_revenue, iit_revenue) = aggr.revenue(r_p_new[:p.T], wnew[:p.T], bmat_s, n_mat[:p.T, :, :], bqmat_new[:p.T, :, :], c_mat[:p.T, :, :], Ynew[:p.T], L[:p.T], K[:p.T], factor, ubi[:p.T, :, :], theta, etr_params_4D, p, 'TPI') total_tax_revenue[:p.T] = total_tax_rev TR_new = fiscal.get_TR(Ynew[:p.T], TR[:p.T], G[:p.T], total_tax_revenue[:p.T], agg_pension_outlays[:p.T], UBI_outlays[:p.T], p, 'TPI') # update vars for next iteration w[:p.T] = wnew[:p.T] r[:p.T] = utils.convex_combo(rnew[:p.T], r[:p.T], p.nu) BQ[:p.T] = utils.convex_combo(BQnew[:p.T], BQ[:p.T], p.nu) D[:p.T] = Dnew[:p.T] Y[:p.T] = utils.convex_combo(Ynew[:p.T], Y[:p.T], p.nu) if not p.baseline_spending: TR[:p.T] = utils.convex_combo(TR_new[:p.T], TR[:p.T], p.nu) guesses_b = utils.convex_combo(b_mat, guesses_b, p.nu) guesses_n = utils.convex_combo(n_mat, guesses_n, p.nu) print('r diff: ', (rnew[:p.T] - r[:p.T]).max(), (rnew[:p.T] - r[:p.T]).min()) print('BQ diff: ', (BQnew[:p.T] - BQ[:p.T]).max(), (BQnew[:p.T] - BQ[:p.T]).min()) print('TR diff: ', (TR_new[:p.T] - TR[:p.T]).max(), (TR_new[:p.T] - TR[:p.T]).min()) print('Y diff: ', (Ynew[:p.T] - Y[:p.T]).max(), (Ynew[:p.T] - Y[:p.T]).min()) if not p.baseline_spending: if TR.all() != 0: TPIdist = np.array( list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) + list( utils.pct_diff_func(BQnew[:p.T], BQ[:p.T]).flatten()) + list(utils.pct_diff_func(TR_new[:p.T], TR[:p.T]))).max() else: TPIdist = np.array( list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) + list( utils.pct_diff_func(BQnew[:p.T], BQ[:p.T]).flatten()) + list(np.abs(TR[:p.T]))).max() else: TPIdist = np.array( list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) + list(utils.pct_diff_func(BQnew[:p.T], BQ[:p.T]).flatten()) + list(utils.pct_diff_func(Ynew[:p.T], Y[:p.T]))).max() TPIdist_vec[TPIiter] = TPIdist # After T=10, if cycling occurs, drop the value of nu # wait til after T=10 or so, because sometimes there is a jump up # in the first couple iterations # if TPIiter > 10: # if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter - 1] > 0: # nu /= 2 # print 'New Value of nu:', nu TPIiter += 1 print('Iteration:', TPIiter) print('\tDistance:', TPIdist) # Compute effective and marginal tax rates for all agents mtrx_params_4D = np.tile( p.mtrx_params[:p.T, :, :].reshape(p.T, p.S, 1, p.mtrx_params.shape[2]), (1, 1, p.J, 1)) mtry_params_4D = np.tile( p.mtry_params[:p.T, :, :].reshape(p.T, p.S, 1, p.mtry_params.shape[2]), (1, 1, p.J, 1)) e_3D = np.tile(p.e.reshape(1, p.S, p.J), (p.T, 1, 1)) mtry_path = tax.MTR_income(r_p_path[:p.T], wpath[:p.T], bmat_s[:p.T, :, :], n_mat[:p.T, :, :], factor, True, e_3D, etr_params_4D, mtry_params_4D, p) mtrx_path = tax.MTR_income(r_p_path[:p.T], wpath[:p.T], bmat_s[:p.T, :, :], n_mat[:p.T, :, :], factor, False, e_3D, etr_params_4D, mtrx_params_4D, p) etr_path = tax.ETR_income(r_p_path[:p.T], wpath[:p.T], bmat_s[:p.T, :, :], n_mat[:p.T, :, :], factor, e_3D, etr_params_4D, p) C = aggr.get_C(c_mat, p, 'TPI') # Note that implicity in this computation is that immigrants' # wealth is all in the form of private capital I_d = aggr.get_I(bmat_splus1[:p.T], K_d[1:p.T + 1], K_d[:p.T], p, 'TPI') I = aggr.get_I(bmat_splus1[:p.T], K[1:p.T + 1], K[:p.T], p, 'TPI') # solve resource constraint # foreign debt service costs debt_service_f = fiscal.get_debt_service_f(r_p, D_f) RC_error = aggr.resource_constraint(Y[:p.T - 1], C[:p.T - 1], G[:p.T - 1], I_d[:p.T - 1], K_f[:p.T - 1], new_borrowing_f[:p.T - 1], debt_service_f[:p.T - 1], r_p[:p.T - 1], p) # Compute total investment (not just domestic) I_total = aggr.get_I(None, K[1:p.T + 1], K[:p.T], p, 'total_tpi') # Compute resource constraint error rce_max = np.amax(np.abs(RC_error)) print('Max absolute value resource constraint error:', rce_max) print('Checking time path for violations of constraints.') for t in range(p.T): household.constraint_checker_TPI(b_mat[t], n_mat[t], c_mat[t], t, p.ltilde) eul_savings = euler_errors[:, :p.S, :].max(1).max(1) eul_laborleisure = euler_errors[:, p.S:, :].max(1).max(1) print('Max Euler error, savings: ', eul_savings) print('Max Euler error labor supply: ', eul_laborleisure) ''' ------------------------------------------------------------------------ Save variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' output = { 'Y': Y[:p.T], 'B': B, 'K': K, 'K_f': K_f, 'K_d': K_d, 'L': L, 'C': C, 'I': I, 'I_total': I_total, 'I_d': I_d, 'BQ': BQ, 'total_tax_revenue': total_tax_revenue, 'business_tax_revenue': business_tax_revenue, 'iit_payroll_tax_revenue': iit_payroll_tax_revenue, 'iit_revenue': iit_revenue, 'payroll_tax_revenue': payroll_tax_revenue, 'TR': TR, 'agg_pension_outlays': agg_pension_outlays, 'bequest_tax_revenue': bequest_tax_revenue, 'wealth_tax_revenue': wealth_tax_revenue, 'cons_tax_revenue': cons_tax_revenue, 'G': G, 'D': D, 'D_f': D_f, 'D_d': D_d, 'r': r, 'r_gov': r_gov, 'r_p': r_p, 'w': w, 'bmat_splus1': bmat_splus1, 'bmat_s': bmat_s[:p.T, :, :], 'n_mat': n_mat[:p.T, :, :], 'c_path': c_mat, 'bq_path': bqmat, 'tr_path': trmat, 'y_before_tax_mat': y_before_tax_mat, 'tax_path': tax_mat, 'eul_savings': eul_savings, 'eul_laborleisure': eul_laborleisure, 'resource_constraint_error': RC_error, 'new_borrowing_f': new_borrowing_f, 'debt_service_f': debt_service_f, 'etr_path': etr_path, 'mtrx_path': mtrx_path, 'mtry_path': mtry_path, 'ubi_path': ubi, 'UBI_path': UBI } tpi_dir = os.path.join(p.output_base, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") with open(tpi_vars, "wb") as f: pickle.dump(output, f) if np.any(G) < 0: print('Government spending is negative along transition path' + ' to satisfy budget') if (((TPIiter >= p.maxiter) or (np.absolute(TPIdist) > p.mindist_TPI)) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError('Transition path equlibrium not found' + ' (TPIdist)') if ((np.any(np.absolute(RC_error) >= p.mindist_TPI * 10)) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError('Transition path equlibrium not found ' + '(RC_error)') if ((np.any(np.absolute(eul_savings) >= p.mindist_TPI) or (np.any(np.absolute(eul_laborleisure) > p.mindist_TPI))) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError('Transition path equlibrium not found ' + '(eulers)') return output