def writeTransitionMatrix(scenario): # load solution objects from pathFinderModule import PathFinder cacheDir = PathFinder.getCacheDir(scenario) with open(os.path.join(cacheDir, 'decisions.pkl'), 'rb') as handle: OPTs = pickle.load(handle) # get the base output directory baseOutputDir = PathFinder.getTransitionMatrixOutputDir() # create output folder if it does not exist if not os.path.exists(baseOutputDir): os.path.mkdir(baseOutputDir) # get the tagged subfolder output directory outputDir = os.path.join(baseOutputDir, PathFinder.getScenarioPathTag(scenario)) # check for whether scenario output subfolder exists # if it does, then this is a duplicate writing out if os.path.exists(outputDir): return None # check if map file exists, create it if it does not if not os.path.exists(os.path.join(baseOutputDir, 'map.csv')): fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'w') for k in scenario: fileHandle.write(k + ',') fileHandle.write('\n') fileHandle.close() # append scenario info to map file by writing out to text file # then loading text file back in with open('.temp.txt', 'w') as f: values = scenario.getParams() w = csv.DictWriter(f, values.keys()) w.writerow(values) f = open('.temp.txt', 'r') text = f.read() f.close() os.path.remove('.temp.txt') fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'a+') print(fileHandle, scenario.basedeftag + ',' + scenario.counterdeftag + ',' + text) fileHandle.close() # create a folder to store output os.path.mkdir(outputDir) # converts policy function into discretized transition matrix # if policy doesn't fall neatly into grid, averages between two # nearest points proportionally to distance from that point def convertToTransitionMatrix(policy, values, dim): discrete = np.digitize(policy, values) distanceToBinEdge = policy - values(discrete) distanceToBinEdgeUpper = policy - values(discrete + 1) upperProbability = distanceToBinEdge / (distanceToBinEdge - distanceToBinEdgeUpper) transition = np.zeros((len(discrete), dim)) transition[np.ravel_multi_index( (np.array(range(grids['nz'] * grids['nk'] * grids['nb'])), (discrete + 1)), transition.shape)] = upperProbability transition[np.ravel_multi_index( (np.array(range( grids['nz'] * grids['nk'] * grids['nb'])), discrete), transition.shape)] = 1 - upperProbability return transition # for a given age, year, discretize assets and lifetime earning # average transitions. store output in `transitions` variable. transitions = {} # store grids for easy access from paramGeneratorModule import ParamGenerator grids = ParamGenerator.grids(scenario) for age in range(OPTs['SAVINGS'].shape[3]): for year in range(OPTs['SAVINGS'].shape[4]): # compute transition matrices for full state -> assets, # earnings grid assetsTransition = convertToTransitionMatrix( OPTs['SAVINGS'][:, :, :, age, year], grids['kv'], grids['nk']) earningsTransition = convertToTransitionMatrix( OPTs['AVG_EARNINGS'][:, :, :, age, year], grids['bv'], grids['nb']) # compute joint transition of assets and earnings assetEarningsTransition = ( np.kron(np.ones((1, grids['nb'])), assetsTransition) * np.kron(earningsTransition, np.ones((1, grids['nk'])))) # expand joint transition of asset and earnings to full # state space size assetEarningsTransition = np.kron(np.ones((1, grids['nz'])), assetEarningsTransition) # get the productivity transition matrix productivityTransition = grids['transz'] productivityTransition = np.squeeze( productivityTransition[age, :, :]) # expand it to the full state space size productivityTransition = np.kron( productivityTransition, np.ones(grids['nb'] * grids['nk'], grids['nb'] * grids['nk'])) # multiply to get full transition matrix transitionMatrix = productivityTransition * assetEarningsTransition # save transition matrix into struct transitions['age' + str(age) + 'year' + str(year)] = transitionMatrix with open(os.path.join(outputDir, 'data.pkl'), 'wb') as handle: pickle.dump(transitions, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __init__(self, scenario, DIST=None, Market=None, OPTs=None): if not scenario.isSteady(): raise Exception( 'Unable to generate income distribution moments for transition paths.' ) # PARAMETERS pathFinder = PathFinder(scenario) self.scenario = scenario save_dir = PathFinder.getCacheDir(scenario) # Define time constants and grids timing = ParamGenerator.timing(scenario) grids = ParamGenerator.grids(scenario) T_life = timing['T_life'] # Total life years T_model = timing['T_model'] # Transition path model years Tmax_work = timing['Tmax_work'] # Largest retirement age ng = grids['ng'] # num groups nz = grids['nz'] # num labor productivity shocks zs = grids['zs'] # shocks grid (by demographic type and age) nk = grids['nk'] # num asset points nb = grids['nb'] # num avg. earnings points # Useful later for a couple of functions self.kv = grids['kv'] self.karray = np.tile(np.reshape(grids['kv'], [1, nk, 1, 1, 1, 1]), [nz, 1, nb, T_life, ng, T_model]) self.T_work = Tmax_work self.T_life = T_life ## DISTRIBUTION AND POLICY FUNCTIONS # Import households distribution if DIST is None: with open(os.path.join(save_dir, 'distribution.pkl'), 'rb') as handle: s = pickle.load(handle) DIST = s['DIST'] dist = DIST.flatten(order='F') if T_model == 1: DIST = DIST[:, :, :, :, :, np.newaxis] dist_l = np.zeros((nz, nk, nb, T_life, ng, T_model)) dist_l[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] = DIST[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] # Working age population dist_l[0:nz, 0:nk, 0:nb, Tmax_work - 1:T_life, 0:ng, 0:T_model] = 0 # Retired population dist_l = dist_l.flatten(order='F') / np.sum(dist_l) # Useful later for a couple of functions self.DIST = DIST # Import market variables if Market is None: with open(os.path.join(save_dir, 'market.pkl')) as handle: s = pickle.load(handle) wages = s['wages'] capsharesAM = s['capsharesAM'] bondDividendRates = s['bondDividendRates'] equityDividendRates = s['equityDividendRates'] else: wages = Market['wages'] capsharesAM = Market['capsharesAM'] bondDividendRates = Market['bondDividendRates'] equityDividendRates = Market['equityDividendRates'] # Import policy functions f = lambda X: np.tile(np.reshape(X, [nz, nk, nb, T_life, 1, T_model]), [1, 1, 1, 1, ng, 1]) if OPTs is None: with open(os.path.join(save_dir, 'decisions.pkl')) as handle: s = pickle.load(handle) s = s['OPTs'] labinc = f(s['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(s['SAVINGS']) self.ben = f(s['OASI_BENEFITS']) self.lab = f(s['LABOR']) self.con = f(s['CONSUMPTION']) else: labinc = f(OPTs['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(OPTs['SAVINGS']) self.ben = f(OPTs['OASI_BENEFITS']) self.lab = f(OPTs['LABOR']) self.con = f(OPTs['CONSUMPTION']) kinc = ((1 - capsharesAM) * bondDividendRates + capsharesAM * equityDividendRates) * k totinc = labinc.flatten(order='F') + kinc.flatten( order='F') + self.ben.flatten(order='F') # Total income labinc = labinc.flatten(order='F') # Labor income k = k.flatten(order='F') # Asset holdings for tomorrow (k') # DATA WEALTH AND INCOME DISTRIBUTIONS file = pathFinder.getMicrosimInputPath( 'SIM_NetPersonalWealth_distribution') self.a_distdata = pd.read_csv(file) self.a_distdata.append([99.9, float('nan'), 1]) # Append last point for graph file = pathFinder.getMicrosimInputPath( 'SIM_PreTaxLaborInc_distribution') self.l_distdata = pd.read_csv(file) self.l_distdata.append([99.9, float('nan'), 1]) # Append last point for graph # MODEL WEALTH AND INCOME DISTRIBUTIONS # Compute wealth distribution self.a_distmodel = get_moments(dist, k) # Gini and Lorenz curve (self.a_ginimodel, self.a_lorenz) = gini(dist, k) # Compute labor income distribution self.l_distmodel = get_moments(dist_l, labinc) # Gini and Lorenz curve (self.l_ginimodel, self.l_lorenz) = gini(dist_l, labinc) # Compute total income distribution self.t_distmodel = get_moments(dist, totinc) # Gini and Lorenz curve (self.t_ginimodel, self.t_lorenz) = gini(dist, labinc)