def shortDescription(self): from paramGeneratorModule import ParamGenerator T_model = ParamGenerator.timing(self)['T_model'] desc = (('[ %s ]' % self.Description) + ('\n \t%-25s= %u' % ('T_model', T_model)) + ('\n \t%-25s= %u' % ('IsLowReturn', self.IsLowReturn)) + ('\n \t%-25s= %7.8f' % ('Beta', self.beta)) + ('\n \t%-25s= %7.8f' % ('Gamma', self.gamma)) + ('\n \t%-25s= %7.8f' % ('Sigma', self.sigma)) + ('\n \t%-25s= %e' % ('Model$', self.modelunit_dollar))) return desc
def doGELoop(): # Build baseline scenario scenario = Scenario(ModelTester.test_params).currentPolicy() # Get production parameters, remap some parameters -- this is # temporary paramsProduction = ParamGenerator.production( scenario ) paramsProduction['capitalShare'] = paramsProduction['alpha'] paramsProduction['laborShare'] = 1 - paramsProduction['alpha'] Initial = {'capital0': 10} wage = 1.5*np.ones(100) discountRate = 0.04*np.ones(100) T = len(wage) # Specify the tolerance level tolerance = {} tolerance['wage'] = 1e-5 tolerance['discountRate'] = 1e-5 excessLabor = np.ones(100) excessShare = np.ones(100) # Start the iterative method iteration = 0 t = time.time() while (max(excessLabor[:]) > tolerance['wage'] or max(excessShare[1:T]) > tolerance['discountRate']): iteration = iteration + 1 Market = {} Market['wage'] = wage Market['discountRate'] = discountRate # Initialize firm theFirm = DynamicFirmGE( Initial, Market, paramsProduction ) laborSupply = theFirm.getLS() laborDemand = theFirm.getLD() excessLabor = abs(laborSupply - laborDemand) shareDemand = theFirm.getShare() excessShare = abs(shareDemand-1) value = theFirm.getValue() # Update guesses wage = (1 / (1 + ((laborSupply - laborDemand) / laborDemand) * 0.1)) * Market['wage'] for t in range(T-1): discountRate[t] = (1 / (1 + ((1 - value[t+1]) / abs(value[t+1])) * 0.1)) * Market['discountRate'][t] discountRate[T-1] = discountRate[T-2] print(time.time() - t)
def report_baseline_moments(): outputfilename = os.path.join(PathFinder.getSourceDir(), 'BaselineMoments.txt') f = open(outputfilename, 'w+') f.write('-------------BASELINE MOMENTS-------------') f.write('%s \r\n' % str(datetime.datetime.now())) # load the matrix and get inverter function (_, f_invert) = ParamGenerator.invert() for labelas in np.arange(0.25, 1.0, 0.25): for savelas in np.arange(0.25, 1.0, 0.25): target = {'labelas': labelas, 'savelas': savelas} f.write( '\r\nBASELINE labor elas = %0.2f savings elas = %0.2f \r\n' % (labelas, savelas)) inverse = f_invert(target) scenario = Scenario({ 'economy': 'steady', 'beta': inverse['beta'], 'gamma': inverse['gamma'], 'sigma': inverse['sigma'], 'modelunit_dollar': inverse['modelunit_dollar'], 'bequest_phi_1': 0 }) save_dir = ModelSolver.solve(scenario) targets = ModelCalibrator.moment_targets targets = np.vstack( (targets, ['labelas', labelas, 'Labor elasticity'])) targets = np.vstack( (targets, ['savelas', savelas, 'Savings elasticity'])) outstr = ModelCalibrator.report_moments(save_dir, targets) f.write('%s \r\n' % outstr) f.write('-------------------------------------\r\n') f.write(' ==== DONE ===== \r\n') f.close()
def adjust_grid(): epsilon = 1e-4 (_, f_invert) = ParamGenerator.invert() green = [0, 180 / 256, 0] cv = np.tile(np.reshape(green, (3, 1)), [1, 16]) #zeros(16,3) grid_beta = [0.950, 1.100] grid_gamma = [0.150, 0.900] grid_sigma = [1.200, 9.000] delta_beta = np.zeros((16, 2)) delta_gamma = np.zeros((16, 2)) delta_sigma = np.zeros((16, 2)) labelasv = np.zeros((16, 1)) savelasv = np.zeros((16, 1)) iter = 0 for labelas in np.arange(0.25, 1, 0.25): for savelas in np.arange(0.25, 1, 0.25): target = {'labelas': labelas, 'savelas': savelas} inverse = f_invert(target) delta_beta[iter, :] = np.hstack( ((inverse['beta'] - grid_beta[0]) / inverse['beta'], (grid_beta[1] - inverse['beta']) / inverse['beta'])) delta_gamma[iter, :] = np.hstack( ((inverse['gamma'] - grid_gamma[0]) / inverse['gamma'], (grid_gamma[1] - inverse['gamma']) / inverse['gamma'])) delta_sigma[iter, :] = np.hstack( ((inverse['sigma'] - grid_sigma[0]) / inverse['sigma'], (grid_sigma[1] - inverse['sigma']) / inverse['sigma'])) delta = min( np.minimum( np.minimum(delta_beta[iter, :], delta_gamma[iter, :]), delta_sigma[iter, :])) if delta <= epsilon: cv[iter, :] = [1, delta, 0] * 200 / 256 labelasv[iter, 0] = labelas savelasv[iter, 0] = savelas iter = iter + 1 # Plot plt.figure() plt.scatter(labelasv, savelasv, s=40, c=cv, marker='o') plt.xlabel('labor elasticity', fontsize=13) plt.xticks(ticks=np.arange(0, 1.00, 0.25)) plt.ylabel('savings elasticity', fontsize=13) plt.yticks(ticks=np.arange(0, 1.00, 0.25)) plt.grid(b=True) # Adjust grids if min(delta_beta[:, 0]) <= epsilon: grid_beta[0] = 0.9 * grid_beta[0] if min(delta_beta[:, 1]) <= epsilon: grid_beta[1] = 1.1 * grid_beta[1] if min(delta_gamma[:, 0]) <= epsilon: grid_gamma[0] = 0.9 * grid_gamma[0] if min(delta_gamma[:, 1]) <= epsilon: grid_gamma[1] = 1.1 * grid_gamma[1] if min(delta_sigma[:, 0]) <= epsilon: grid_sigma[0] = max(1.01, 0.9 * grid_sigma[0]) if min(delta_sigma[:, 1]) <= epsilon: grid_sigma[1] = 1.1 * grid_sigma[1] return (grid_beta, grid_gamma, grid_sigma)
def writeTransitionMatrix(scenario): # load solution objects from pathFinderModule import PathFinder cacheDir = PathFinder.getCacheDir(scenario) with open(os.path.join(cacheDir, 'decisions.pkl'), 'rb') as handle: OPTs = pickle.load(handle) # get the base output directory baseOutputDir = PathFinder.getTransitionMatrixOutputDir() # create output folder if it does not exist if not os.path.exists(baseOutputDir): os.path.mkdir(baseOutputDir) # get the tagged subfolder output directory outputDir = os.path.join(baseOutputDir, PathFinder.getScenarioPathTag(scenario)) # check for whether scenario output subfolder exists # if it does, then this is a duplicate writing out if os.path.exists(outputDir): return None # check if map file exists, create it if it does not if not os.path.exists(os.path.join(baseOutputDir, 'map.csv')): fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'w') for k in scenario: fileHandle.write(k + ',') fileHandle.write('\n') fileHandle.close() # append scenario info to map file by writing out to text file # then loading text file back in with open('.temp.txt', 'w') as f: values = scenario.getParams() w = csv.DictWriter(f, values.keys()) w.writerow(values) f = open('.temp.txt', 'r') text = f.read() f.close() os.path.remove('.temp.txt') fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'a+') print(fileHandle, scenario.basedeftag + ',' + scenario.counterdeftag + ',' + text) fileHandle.close() # create a folder to store output os.path.mkdir(outputDir) # converts policy function into discretized transition matrix # if policy doesn't fall neatly into grid, averages between two # nearest points proportionally to distance from that point def convertToTransitionMatrix(policy, values, dim): discrete = np.digitize(policy, values) distanceToBinEdge = policy - values(discrete) distanceToBinEdgeUpper = policy - values(discrete + 1) upperProbability = distanceToBinEdge / (distanceToBinEdge - distanceToBinEdgeUpper) transition = np.zeros((len(discrete), dim)) transition[np.ravel_multi_index( (np.array(range(grids['nz'] * grids['nk'] * grids['nb'])), (discrete + 1)), transition.shape)] = upperProbability transition[np.ravel_multi_index( (np.array(range( grids['nz'] * grids['nk'] * grids['nb'])), discrete), transition.shape)] = 1 - upperProbability return transition # for a given age, year, discretize assets and lifetime earning # average transitions. store output in `transitions` variable. transitions = {} # store grids for easy access from paramGeneratorModule import ParamGenerator grids = ParamGenerator.grids(scenario) for age in range(OPTs['SAVINGS'].shape[3]): for year in range(OPTs['SAVINGS'].shape[4]): # compute transition matrices for full state -> assets, # earnings grid assetsTransition = convertToTransitionMatrix( OPTs['SAVINGS'][:, :, :, age, year], grids['kv'], grids['nk']) earningsTransition = convertToTransitionMatrix( OPTs['AVG_EARNINGS'][:, :, :, age, year], grids['bv'], grids['nb']) # compute joint transition of assets and earnings assetEarningsTransition = ( np.kron(np.ones((1, grids['nb'])), assetsTransition) * np.kron(earningsTransition, np.ones((1, grids['nk'])))) # expand joint transition of asset and earnings to full # state space size assetEarningsTransition = np.kron(np.ones((1, grids['nz'])), assetEarningsTransition) # get the productivity transition matrix productivityTransition = grids['transz'] productivityTransition = np.squeeze( productivityTransition[age, :, :]) # expand it to the full state space size productivityTransition = np.kron( productivityTransition, np.ones(grids['nb'] * grids['nk'], grids['nb'] * grids['nk'])) # multiply to get full transition matrix transitionMatrix = productivityTransition * assetEarningsTransition # save transition matrix into struct transitions['age' + str(age) + 'year' + str(year)] = transitionMatrix with open(os.path.join(outputDir, 'data.pkl'), 'wb') as handle: pickle.dump(transitions, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __init__(self, params): assert isinstance( params, dict ), 'Scenario constructor expects dictionary of parameter values.' # Set certain required missing params to defaults if not 'IsSteadyState' in params.keys(): params['IsSteadyState'] = 0 if not 'LaborShock' in params.keys(): params['LaborShock'] = Scenario.policy_params['LaborShock'] # If required parameters are missing, # find them from calibrator (which looks at other params) check = [ 1 if not v in params.keys() else 0 for v in Scenario.initial_params ] if any(check): print( '[INFO] Scenario core parameters missing. Fetching from Calibrator.\n' ) from paramGeneratorModule import ParamGenerator x = ParamGenerator.invert(params) params['beta'] = x['beta'] params['gamma'] = x['gamma'] params['sigma'] = x['sigma'] params['modelunit_dollar'] = x['modelunit_dollar'] params['bequest_phi_1'] = x['bequest_phi_1'] # Assign fields to Scenario, warn if any extras for k in params.keys(): if hasattr(self, k): setattr(self, k, params[k]) else: warnings.warn( 'Field <%s> does not match any Scenario fields.' % k) # Check for required initial parameters # TBD: leave this here or take it out from needCalibrate check? for i in Scenario.initial_params: assert i in params.keys() and params[ i] != None, 'Scenario constructor requires nonempty <%s> parameter.' % i # Check for required transition path parameters for i in Scenario.transition_params: assert i in params.keys() and params[ i] != None, 'Scenario constructor requires nonempty <%s> parameter.' % i # Set optional policy parameters defaults where unspecified for i in Scenario.policy_params: if not i in params.keys(): setattr(self, i, Scenario.policy_params[i]) # Construct Scenario Description if it is not present if self.Description == None: if self.isCurrentPolicy(): policy = 'CurrentPolicy' else: policy = 'Counterfactual' self.Description = self.OpennessPath + '-' + policy if self.IsSteadyState: self.Description = 'Steady-state' self.ConstructedDescription = 1 else: self.ConstructedDescription = 0 # TBD: Check validity of Description, since sometimes used # as filename. # Set version defaults where unspecified for i in Scenario.version_params: if not i in params.keys(): setattr(self, i, Scenario.version_params[i]) # Fix timing inconsistencies, if any self.ClosureYear = min(max(self.ClosureYear, self.TransitionFirstYear), self.TransitionLastYear) self.PolicyShockYear = min( max(self.PolicyShockYear, self.TransitionFirstYear), self.TransitionLastYear) # Generate identifier tags for baseline and counterfactual definitions # 1. Make string of concatenated params # 2. Hash the string down to 120 chars # NOTE: comparisontag is built for isEquivalent tags = {} tags['initial'] = '' for i in Scenario.initial_params: tags['initial'] += '_' + str(getattr(self, i)) tags['initialExVersion'] = tags['initial'] for i in Scenario.version_params.keys(): tags['initial'] += '_' + getattr(self, i) self.basedeftag = Scenario.compactifyTag(tags['initial']) tags['policy'] = '' for i in Scenario.policy_params.keys(): tags['policy'] += '_' + str(getattr(self, i)) if self.isCurrentPolicy(): self.counterdeftag = 'currentpolicy' else: self.counterdeftag = Scenario.compactifyTag(tags['policy']) tags['transition'] = '' if self.IsSteadyState: tags['transition'] = 'steady' self.transitiontag = 'steady' else: for i in Scenario.transition_params: tags['transition'] += '_' + str(getattr(self, i)) self.transitiontag = Scenario.compactifyTag(tags['transition']) self.comparisontag = tags['initial'] + tags['policy'] + tags[ 'transition'] self.nonversioncomparisontag = tags['initialExVersion'] + tags[ 'policy'] + tags['transition']
from paramGeneratorModule import ParamGenerator from inputReaderModule import InputReader from socialSecurityModule import SocialSecurity from initialGuessModule import InitialGuess import pandas as pd import numpy as np import os import pickle t = ModelTester.test_params scenario = Scenario(t) scenario = scenario.currentPolicy().steady() s = ParamGenerator.labinc_discretization(scenario) ''' #Test ParamGenerator.social_security from modelTesterModule import ModelTester from scenarioModule import Scenario from pathFinderModule import PathFinder from paramGeneratorModule import ParamGenerator from inputReaderModule import InputReader from socialSecurityModule import SocialSecurity from initialGuessModule import InitialGuess import pandas as pd import numpy as np import os import pickle
def __init__(self, scenario, DIST=None, Market=None, OPTs=None): if not scenario.isSteady(): raise Exception( 'Unable to generate income distribution moments for transition paths.' ) # PARAMETERS pathFinder = PathFinder(scenario) self.scenario = scenario save_dir = PathFinder.getCacheDir(scenario) # Define time constants and grids timing = ParamGenerator.timing(scenario) grids = ParamGenerator.grids(scenario) T_life = timing['T_life'] # Total life years T_model = timing['T_model'] # Transition path model years Tmax_work = timing['Tmax_work'] # Largest retirement age ng = grids['ng'] # num groups nz = grids['nz'] # num labor productivity shocks zs = grids['zs'] # shocks grid (by demographic type and age) nk = grids['nk'] # num asset points nb = grids['nb'] # num avg. earnings points # Useful later for a couple of functions self.kv = grids['kv'] self.karray = np.tile(np.reshape(grids['kv'], [1, nk, 1, 1, 1, 1]), [nz, 1, nb, T_life, ng, T_model]) self.T_work = Tmax_work self.T_life = T_life ## DISTRIBUTION AND POLICY FUNCTIONS # Import households distribution if DIST is None: with open(os.path.join(save_dir, 'distribution.pkl'), 'rb') as handle: s = pickle.load(handle) DIST = s['DIST'] dist = DIST.flatten(order='F') if T_model == 1: DIST = DIST[:, :, :, :, :, np.newaxis] dist_l = np.zeros((nz, nk, nb, T_life, ng, T_model)) dist_l[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] = DIST[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] # Working age population dist_l[0:nz, 0:nk, 0:nb, Tmax_work - 1:T_life, 0:ng, 0:T_model] = 0 # Retired population dist_l = dist_l.flatten(order='F') / np.sum(dist_l) # Useful later for a couple of functions self.DIST = DIST # Import market variables if Market is None: with open(os.path.join(save_dir, 'market.pkl')) as handle: s = pickle.load(handle) wages = s['wages'] capsharesAM = s['capsharesAM'] bondDividendRates = s['bondDividendRates'] equityDividendRates = s['equityDividendRates'] else: wages = Market['wages'] capsharesAM = Market['capsharesAM'] bondDividendRates = Market['bondDividendRates'] equityDividendRates = Market['equityDividendRates'] # Import policy functions f = lambda X: np.tile(np.reshape(X, [nz, nk, nb, T_life, 1, T_model]), [1, 1, 1, 1, ng, 1]) if OPTs is None: with open(os.path.join(save_dir, 'decisions.pkl')) as handle: s = pickle.load(handle) s = s['OPTs'] labinc = f(s['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(s['SAVINGS']) self.ben = f(s['OASI_BENEFITS']) self.lab = f(s['LABOR']) self.con = f(s['CONSUMPTION']) else: labinc = f(OPTs['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(OPTs['SAVINGS']) self.ben = f(OPTs['OASI_BENEFITS']) self.lab = f(OPTs['LABOR']) self.con = f(OPTs['CONSUMPTION']) kinc = ((1 - capsharesAM) * bondDividendRates + capsharesAM * equityDividendRates) * k totinc = labinc.flatten(order='F') + kinc.flatten( order='F') + self.ben.flatten(order='F') # Total income labinc = labinc.flatten(order='F') # Labor income k = k.flatten(order='F') # Asset holdings for tomorrow (k') # DATA WEALTH AND INCOME DISTRIBUTIONS file = pathFinder.getMicrosimInputPath( 'SIM_NetPersonalWealth_distribution') self.a_distdata = pd.read_csv(file) self.a_distdata.append([99.9, float('nan'), 1]) # Append last point for graph file = pathFinder.getMicrosimInputPath( 'SIM_PreTaxLaborInc_distribution') self.l_distdata = pd.read_csv(file) self.l_distdata.append([99.9, float('nan'), 1]) # Append last point for graph # MODEL WEALTH AND INCOME DISTRIBUTIONS # Compute wealth distribution self.a_distmodel = get_moments(dist, k) # Gini and Lorenz curve (self.a_ginimodel, self.a_lorenz) = gini(dist, k) # Compute labor income distribution self.l_distmodel = get_moments(dist_l, labinc) # Gini and Lorenz curve (self.l_ginimodel, self.l_lorenz) = gini(dist_l, labinc) # Compute total income distribution self.t_distmodel = get_moments(dist, totinc) # Gini and Lorenz curve (self.t_ginimodel, self.t_lorenz) = gini(dist, labinc)