def isSolved(self): from pathFinderModule import PathFinder flag = os.path.exists( os.path.join(PathFinder.getCacheDir(self), 'solved')) if flag: # Check that hashed location is correct scenario with open( os.path.join(PathFinder.getCacheDir(self), 'scenario.pkl'), 'rb') as handle: s = pickle.load(handle) flag = self.isEquivalent(Scenario(s['scenario'])) if not flag: # TBD: Until cache-ing is revised. raise Exception( 'Scenario:BAD_CACHEING - WARNING! Cached scenario at location is not this scenario. ' ) return flag
def generate(self): Market = {} Dynamic = {} steadyScenario = self.scenario.currentPolicy().steady() steady_dir = PathFinder.getCacheDir(steadyScenario) source = 'cached scenario' if os.path.isfile(os.path.join(steady_dir, 'market.pkl')): with open(os.path.join(steady_dir, 'market.pkl'), 'rb') as handle: Market = pickle.load(handle) if os.path.isfile(os.path.join(steady_dir, 'dynamics.pkl')): with open(os.path.join(steady_dir, 'dynamics.pkl'), 'rb') as handle: Dynamic = pickle.load(handle) if len(Market) == 0 or len(Dynamic) == 0: source = 'made-up numbers' # Load initial guesses (values come from some steady state results) Dynamic['outs'] = np.array([3.1980566]) Dynamic['caps'] = np.array([9.1898354]) Dynamic['labs'] = 0.5235 captoout = Dynamic['caps'] / Dynamic['outs'] debttoout = np.array([0.75]) Market['beqs'] = np.array([0.153155]) Market['capsharesAM'] = captoout / ( captoout + debttoout ) # capshare = (K/Y / (K/Y + D/Y)), where K/Y = captoout = 3 and D/Y = debttoout. Market['capsharesPM'] = Market[ 'capsharesAM'] # capshare = (K/Y / (K/Y + D/Y)), where K/Y = captoout = 3 and D/Y = debttoout. Market['rhos'] = 4.94974 Market[ 'invtocaps'] = 0.0078 + 0.056 # I/K = pop growth rate 0.0078 + depreciation Market['investmentToCapital0'] = 0.16 Market['equityDividendRates'] = 0.05 Market['worldAfterTaxReturn'] = 0.05 Market['corpLeverageCost'] = 2 Market['passLeverageCost'] = 2 Dynamic['debts'] = Dynamic['outs'] * debttoout Dynamic['assetsAM'] = Dynamic['caps'] + Dynamic[ 'debts'] # Assume p_K(0)=1 Dynamic['assetsPM'] = Dynamic['assetsAM'] Dynamic['labeffs'] = Dynamic['caps'] / Market['rhos'] Dynamic['investment'] = Dynamic['caps'] * Market['invtocaps'] Dynamic['caps_foreign'] = 0 setattr(self, 'Market', Market) setattr(self, 'Dynamic', Dynamic) print('[INFO] Generated new initial guess from %s. \n' % source)
def SS_distribution(self): s = {} # Import variables common to all elements of s dist_retired = self.DIST[:, :, :, (self.T_work + 1):self.T_life, :, :, :] ben_retired = self.ben[:, :, :, (self.T_work + 1):self.T_life, :, :, :] dist_retired = dist_retired[:] ben_retired = ben_retired[:] # Calculate SS outlays as a percentage of GDP steady_dir = PathFinder.getCacheDir(self.scenario) with open(os.path.join(steady_dir, 'dynamics.pkl'), 'rb') as handle: s_dynamics = pickle.load(handle) s['SSbentoout'] = np.sum(ben_retired * dist_retired, axis=1) / s_dynamics['outs'] s['SStaxtoout'] = s_dynamics['ssts'] / s_dynamics['outs'] # Table with distribution of Social Security benefits among retired households dist_retired0 = self.DIST[:, :, 0, (self.T_work + 1):self.T_life, :, :, :] dist_retired0 = dist_retired0[:] / np.sum(dist_retired) dist_retired = dist_retired / np.sum(dist_retired[:]) ben_distmodel = get_moments(dist_retired, ben_retired) ben0 = { 'percentile': sum(dist_retired0), 'threshold': 0, 'cumulativeShare': 0 } s['ben_dist'] = pd.DataFrame(ben0) s['ben_dist'].append(ben_distmodel) # Average asset holdings of retiree earning no SS benefits k_retired0 = self.karray[:, :, 0, self.T_work:self.T_life, :, :, :] k_retired0 = k_retired0[:] k_retired0 = k_retired0 * dist_retired0 s['k_retired0'] = sum(k_retired0) / sum( dist_retired0) / self.scenario['modelunit_dollar'] # Average consumption of retiree earning no SS benefits c_retired0 = self.con[:, :, 0, self.T_work:self.T_life, :, :, :] c_retired0 = c_retired0[:] c_retired0 = c_retired0 * dist_retired0 s.c_retired0 = sum(c_retired0) / sum( dist_retired0) / self.scenario['modelunit_dollar'] return s
def export(self, outputName=None): # If no outputName, create one from Scenario if outputName == None: outputName = self.Description if not self.isSolved(): from modelSolverModule import ModelSolver ModelSolver.solve(self) from pathFinderModule import PathFinder cacheDir = PathFinder.getCacheDir(self) outDir = PathFinder(self).getNamedOutputPath(outputName) print('Exporting scenario to %s \n' % outDir) if os.path.exists(outDir): shutil.rmtree(outDir) shutil.copyfile(cacheDir, outDir)
def writeTransitionMatrix(scenario): # load solution objects from pathFinderModule import PathFinder cacheDir = PathFinder.getCacheDir(scenario) with open(os.path.join(cacheDir, 'decisions.pkl'), 'rb') as handle: OPTs = pickle.load(handle) # get the base output directory baseOutputDir = PathFinder.getTransitionMatrixOutputDir() # create output folder if it does not exist if not os.path.exists(baseOutputDir): os.path.mkdir(baseOutputDir) # get the tagged subfolder output directory outputDir = os.path.join(baseOutputDir, PathFinder.getScenarioPathTag(scenario)) # check for whether scenario output subfolder exists # if it does, then this is a duplicate writing out if os.path.exists(outputDir): return None # check if map file exists, create it if it does not if not os.path.exists(os.path.join(baseOutputDir, 'map.csv')): fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'w') for k in scenario: fileHandle.write(k + ',') fileHandle.write('\n') fileHandle.close() # append scenario info to map file by writing out to text file # then loading text file back in with open('.temp.txt', 'w') as f: values = scenario.getParams() w = csv.DictWriter(f, values.keys()) w.writerow(values) f = open('.temp.txt', 'r') text = f.read() f.close() os.path.remove('.temp.txt') fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'a+') print(fileHandle, scenario.basedeftag + ',' + scenario.counterdeftag + ',' + text) fileHandle.close() # create a folder to store output os.path.mkdir(outputDir) # converts policy function into discretized transition matrix # if policy doesn't fall neatly into grid, averages between two # nearest points proportionally to distance from that point def convertToTransitionMatrix(policy, values, dim): discrete = np.digitize(policy, values) distanceToBinEdge = policy - values(discrete) distanceToBinEdgeUpper = policy - values(discrete + 1) upperProbability = distanceToBinEdge / (distanceToBinEdge - distanceToBinEdgeUpper) transition = np.zeros((len(discrete), dim)) transition[np.ravel_multi_index( (np.array(range(grids['nz'] * grids['nk'] * grids['nb'])), (discrete + 1)), transition.shape)] = upperProbability transition[np.ravel_multi_index( (np.array(range( grids['nz'] * grids['nk'] * grids['nb'])), discrete), transition.shape)] = 1 - upperProbability return transition # for a given age, year, discretize assets and lifetime earning # average transitions. store output in `transitions` variable. transitions = {} # store grids for easy access from paramGeneratorModule import ParamGenerator grids = ParamGenerator.grids(scenario) for age in range(OPTs['SAVINGS'].shape[3]): for year in range(OPTs['SAVINGS'].shape[4]): # compute transition matrices for full state -> assets, # earnings grid assetsTransition = convertToTransitionMatrix( OPTs['SAVINGS'][:, :, :, age, year], grids['kv'], grids['nk']) earningsTransition = convertToTransitionMatrix( OPTs['AVG_EARNINGS'][:, :, :, age, year], grids['bv'], grids['nb']) # compute joint transition of assets and earnings assetEarningsTransition = ( np.kron(np.ones((1, grids['nb'])), assetsTransition) * np.kron(earningsTransition, np.ones((1, grids['nk'])))) # expand joint transition of asset and earnings to full # state space size assetEarningsTransition = np.kron(np.ones((1, grids['nz'])), assetEarningsTransition) # get the productivity transition matrix productivityTransition = grids['transz'] productivityTransition = np.squeeze( productivityTransition[age, :, :]) # expand it to the full state space size productivityTransition = np.kron( productivityTransition, np.ones(grids['nb'] * grids['nk'], grids['nb'] * grids['nk'])) # multiply to get full transition matrix transitionMatrix = productivityTransition * assetEarningsTransition # save transition matrix into struct transitions['age' + str(age) + 'year' + str(year)] = transitionMatrix with open(os.path.join(outputDir, 'data.pkl'), 'wb') as handle: pickle.dump(transitions, handle, protocol=pickle.HIGHEST_PROTOCOL)
def testOutput(scenario, testName, isInteractive): # Set to testing environment PathFinder.setToTestingMode() # Clear the old results and solve ModelSolver.removeCached(scenario) taggedDir = ModelSolver.solve(scenario) cacheDir = PathFinder.getCacheDir(scenario) # Set to development environment # TBD: Set back to original environment? PathFinder.setToDevelopmentMode() # testSet depends on type of scenario if( scenario.isSteady() ): setNames = ['market', 'dynamics'] elif( scenario.isCurrentPolicy() ): setNames = ['market', 'dynamics' ] else: setNames = ['market', 'dynamics', 'statics'] # Load target values targetfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ModelTester.pkl') with open(targetfile, 'rb') as handle: s = pickle.load(handle) target = s.target # Initialize match flag typeDeviation = ModelTester.DEVIATION_NONE # Define function to flag issues # NOTE: Relies on severity of deviation to be increasing def flag(str, deviation): print('\t%-15s%-20s%s\n' % (setname, valuename, str)) global typeDeviation if deviation > typeDeviation: typeDeviation = deviation print('\n[Test results]\n') for i in range(len(setNames)): # Extract output and target values by set setname = setNames[i] output = {} with open(os.path.join(cacheDir, ('%s.pkl' % setname)), 'rb') as handle: output[testName][setname] = pickle.load(handle) outputset = output[testName][setname] targetset = target[testName][setname] # Iterate over target values targetvaluenames = targetset.keys() for j in range(len(targetvaluenames)): valuename = targetvaluenames[j] if not valuename in outputset.keys(): # Flag missing value flag('Not found', ModelTester.DEVIATION_FATAL) continue if isinstance(outputset[valuename], dict): # Skip checking of structs -- it is currently just # priceindex which does not need to be checked print('\tSkipping %s because it is a struct.\n' % valuename) continue if np.any(np.isnan(outputset[valuename][:])): # Flag NaN value flag('NaN value', ModelTester.DEVIATION_FATAL) continue if np.any(outputset[valuename].shape != targetset[valuename].shape): # Flag for size mismatch flag('Size mismatch', ModelTester.DEVIATION_FATAL) continue # Classify deviation deviation = ModelTester.calculateDeviation(outputset[valuename][:], targetset[valuename][:]) if deviation > 0: if (deviation < 1e-6): msg = 'TINY : %06.16f%% deviation' % deviation*100 flag(msg, ModelTester.DEVIATION_TINY) elif deviation < 1e-4: msg = 'SMALL: %06.16f%% deviation' % deviation*100 flag( msg, ModelTester.DEVIATION_SMALL ) else: msg = 'LARGE: %06.4f%% deviation' % deviation*100 flag( msg, ModelTester.DEVIATION_FATAL ) # Identify new values, if any outputvaluenames = outputset.keys() for j in range(len(outputvaluenames)): valuename = outputvaluenames[j] if not valuename in targetset.keys(): flag('New', ModelTester.DEVIATION_FATAL) # Check for match if typeDeviation == ModelTester.DEVIATION_NONE: print('\tTarget matched.\n\n') else: if not isInteractive: print( '\tTarget not matched.\n\n' ) return # Query user for target update ans = input('\n\tUpdate test target with new values? Y/[N]: ') if ans == 'Y': target[testName] = output[testName] with open(targetfile) as f: pickle.dump(target, f) print('\tTarget updated.\n\n') else: print('\tTarget retained.\n\n') return typeDeviation
def unanticipated_shock(): # Make the baseline scenario and "non-shock" version t = ModelTester.test_params # baseline scenario is not shocked s_baseline = Scenario(t).currentPolicy().baseline() # Make "non-shock" shock baseline t = s_baseline.getParams() t.PolicyShockYear = t.TransitionFirstYear + ModelTester.policyShockShift s_next = Scenario(t) # Get baseline Market, Dynamic ModelSolver.removeCached(s_baseline) # Clear cached Scenario tagged_dir = ModelSolver.solve(s_baseline) baseline_dir = PathFinder.getCacheDir(s_baseline) with open(os.path.join(baseline_dir, 'market.pkl'), 'rb') as handle: baseMarket = pickle.load(handle) with open(os.path.join(baseline_dir, 'dynamics.pkl'), 'rb') as handle: baseDynamic = pickle.load(handle) # Get shocked Market, Dynamic ModelSolver.removeCached(s_next) # Clear cached scenario tagged_dir = ModelSolver.solve(s_next) x_dir = PathFinder.getCacheDir(s_next) with open(os.path.join(x_dir, 'market.pkl'), 'rb') as handle: xMarket = pickle.load(handle) with open(os.path.join(x_dir, 'dynamics.pkl'), 'rb') as handle: xDynamic = pickle.load(handle) # Compare baseline and shocked path print( '\n' ) def do_check (baseD, xD, dName): passed = 1 for p in baseD.keys(): valuename = p if (not isinstance(baseD[valuename], numbers.Number) or ('_next' in valuename)): continue # Check for within percent tolerance, also check # within numerical deviation (this is in case div by # zero or close to zero) # TBD: Standardize deviations and tolerances percentDeviation = abs((xD[valuename] - baseD[valuename]) / baseD[valuename]) absoluteDeviation = abs(baseD[valuename] - xD[valuename]) if not np.all(np.array(percentDeviation) < 1e-4): if not np.all(np.array(absoluteDeviation) < 1e-13): m1 = print( 'Max percentdev = %f' % max(percentDeviation) ) m2 = print( 'Max abs dev = %0.14f' % max(absoluteDeviation) ) print( '%s.%s outside tolerance;\t\t %s; %s \n' % (dName, valuename, m1, m2)) passed = 0 return passed passed = do_check( baseMarket , xMarket , 'Market' ) passed = do_check( baseDynamic, xDynamic, 'Dynamic' ) if passed: print( 'All values within convergence tolerances.\n' ) return passed
def __init__(self, scenario, DIST=None, Market=None, OPTs=None): if not scenario.isSteady(): raise Exception( 'Unable to generate income distribution moments for transition paths.' ) # PARAMETERS pathFinder = PathFinder(scenario) self.scenario = scenario save_dir = PathFinder.getCacheDir(scenario) # Define time constants and grids timing = ParamGenerator.timing(scenario) grids = ParamGenerator.grids(scenario) T_life = timing['T_life'] # Total life years T_model = timing['T_model'] # Transition path model years Tmax_work = timing['Tmax_work'] # Largest retirement age ng = grids['ng'] # num groups nz = grids['nz'] # num labor productivity shocks zs = grids['zs'] # shocks grid (by demographic type and age) nk = grids['nk'] # num asset points nb = grids['nb'] # num avg. earnings points # Useful later for a couple of functions self.kv = grids['kv'] self.karray = np.tile(np.reshape(grids['kv'], [1, nk, 1, 1, 1, 1]), [nz, 1, nb, T_life, ng, T_model]) self.T_work = Tmax_work self.T_life = T_life ## DISTRIBUTION AND POLICY FUNCTIONS # Import households distribution if DIST is None: with open(os.path.join(save_dir, 'distribution.pkl'), 'rb') as handle: s = pickle.load(handle) DIST = s['DIST'] dist = DIST.flatten(order='F') if T_model == 1: DIST = DIST[:, :, :, :, :, np.newaxis] dist_l = np.zeros((nz, nk, nb, T_life, ng, T_model)) dist_l[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] = DIST[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng, 0:T_model] # Working age population dist_l[0:nz, 0:nk, 0:nb, Tmax_work - 1:T_life, 0:ng, 0:T_model] = 0 # Retired population dist_l = dist_l.flatten(order='F') / np.sum(dist_l) # Useful later for a couple of functions self.DIST = DIST # Import market variables if Market is None: with open(os.path.join(save_dir, 'market.pkl')) as handle: s = pickle.load(handle) wages = s['wages'] capsharesAM = s['capsharesAM'] bondDividendRates = s['bondDividendRates'] equityDividendRates = s['equityDividendRates'] else: wages = Market['wages'] capsharesAM = Market['capsharesAM'] bondDividendRates = Market['bondDividendRates'] equityDividendRates = Market['equityDividendRates'] # Import policy functions f = lambda X: np.tile(np.reshape(X, [nz, nk, nb, T_life, 1, T_model]), [1, 1, 1, 1, ng, 1]) if OPTs is None: with open(os.path.join(save_dir, 'decisions.pkl')) as handle: s = pickle.load(handle) s = s['OPTs'] labinc = f(s['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(s['SAVINGS']) self.ben = f(s['OASI_BENEFITS']) self.lab = f(s['LABOR']) self.con = f(s['CONSUMPTION']) else: labinc = f(OPTs['LABOR']) * np.tile( np.reshape(np.transpose(zs, [2, 1, 0]), [nz, 1, 1, T_life, 1, T_model]), [1, nk, nb, 1, ng, 1]) * wages k = f(OPTs['SAVINGS']) self.ben = f(OPTs['OASI_BENEFITS']) self.lab = f(OPTs['LABOR']) self.con = f(OPTs['CONSUMPTION']) kinc = ((1 - capsharesAM) * bondDividendRates + capsharesAM * equityDividendRates) * k totinc = labinc.flatten(order='F') + kinc.flatten( order='F') + self.ben.flatten(order='F') # Total income labinc = labinc.flatten(order='F') # Labor income k = k.flatten(order='F') # Asset holdings for tomorrow (k') # DATA WEALTH AND INCOME DISTRIBUTIONS file = pathFinder.getMicrosimInputPath( 'SIM_NetPersonalWealth_distribution') self.a_distdata = pd.read_csv(file) self.a_distdata.append([99.9, float('nan'), 1]) # Append last point for graph file = pathFinder.getMicrosimInputPath( 'SIM_PreTaxLaborInc_distribution') self.l_distdata = pd.read_csv(file) self.l_distdata.append([99.9, float('nan'), 1]) # Append last point for graph # MODEL WEALTH AND INCOME DISTRIBUTIONS # Compute wealth distribution self.a_distmodel = get_moments(dist, k) # Gini and Lorenz curve (self.a_ginimodel, self.a_lorenz) = gini(dist, k) # Compute labor income distribution self.l_distmodel = get_moments(dist_l, labinc) # Gini and Lorenz curve (self.l_ginimodel, self.l_lorenz) = gini(dist_l, labinc) # Compute total income distribution self.t_distmodel = get_moments(dist, totinc) # Gini and Lorenz curve (self.t_ginimodel, self.t_lorenz) = gini(dist, labinc)