def main(): # Set targets for K/Y and the Lorenz curve based on the data lorenz_target = getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=Params.percentiles_to_match) lorenz_long_data = np.hstack((np.array(0.0),getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) KY_target = 10.26 # Make AgentTypes for estimation InfiniteType = cstwMPCagent(**Params.init_infinite) InfiniteType.AgeDstn = np.array(1.0) EstimationAgentList = [] total_types = 1 EstimationAgentList.append(deepcopy(InfiniteType)) # Make an economy for the consumers to live in EstimationEconomy = cstwMPCmarket(**Params.init_market) EstimationEconomy.agents = EstimationAgentList EstimationEconomy.KYratioTarget = KY_target EstimationEconomy.LorenzTarget = lorenz_target EstimationEconomy.LorenzData = lorenz_long_data EstimationEconomy.PopGroFac = 1.0 EstimationEconomy.TypeWeight = [1.0] EstimationEconomy.act_T = Params.T_sim_PY EstimationEconomy.ignore_periods = Params.ignore_periods_PY # Choose the bounding region for the parameter search spec_name = 'BetaDistPY' param_name = 'DiscFac' dist_type = 'uniform' if param_name == 'CRRA': param_range = [0.2,70.0] elif param_name == 'DiscFac': param_range = [0.95,0.99] else: print('Parameter range for ' + Params.param_name + ' has not been defined!') # Run the param-point estimation only paramPointObjective = lambda center : getKYratioDifference(Economy = EstimationEconomy, param_name = param_name, param_count = total_types, center = center, spread = 0.0, dist_type = dist_type) t_start = clock() center_estimate = brentq(paramPointObjective,param_range[0],param_range[1],xtol=1e-6) spread_estimate = 0.0 t_end = clock() # Display statistics about the estimated model EstimationEconomy.LorenzBool = True EstimationEconomy.ManyStatsBool = True EstimationEconomy.distributeParams(param_name,total_types,center_estimate,spread_estimate,dist_type) EstimationEconomy.solve() EstimationEconomy.calcLorenzDistance() print('Estimate is center=' + str(center_estimate) + ', spread=' + str(spread_estimate) + ', took ' + str(t_end-t_start) + ' seconds.') EstimationEconomy.center_estimate = center_estimate EstimationEconomy.spread_estimate = spread_estimate EstimationEconomy.showManyStats(spec_name)
def calcLorenzDistance(SomeTypes): ''' Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the 20th, 40th, 60th, and 80th percentiles. Parameters ---------- SomeTypes : [AgentType] List of AgentTypes that have been solved and simulated. Current levels of individual assets should be stored in the attribute aLvlNow. Returns ------- lorenz_distance : float Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves. ''' # Define empirical Lorenz curve points lorenz_SCF = np.array([-0.00183091, 0.0104425, 0.0552605, 0.1751907]) # Extract asset holdings from all consumer types aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes]) # Calculate simulated Lorenz curve points lorenz_sim = getLorenzShares(aLvl_sim, percentiles=[0.2, 0.4, 0.6, 0.8]) # Calculate the Euclidean distance between the simulated and actual Lorenz curves lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2)) # Return the Lorenz distance return lorenz_distance
def calculateLorenzDifference(sim_wealth, weights, percentiles, target_levels): ''' Calculates the sum of squared differences between the simulatedLorenz curve at the specified percentile levels and the target Lorenz levels. Parameters ---------- sim_wealth : numpy.array Array with simulated wealth values. weights : numpy.array List of weights for each row of sim_wealth. percentiles : [float] Points in the distribution of wealth to match. target_levels : np.array Actual U.S. Lorenz curve levels at the specified percentiles. Returns ------- distance : float Sum of squared distances between simulated and target Lorenz curves. ''' sim_lorenz = getLorenzShares(sim_wealth, weights=weights, percentiles=percentiles) distance = sum((100 * sim_lorenz - 100 * target_levels)**2) return distance
def makeLorenzFig(real_wealth, real_weights, sim_wealth, sim_weights): ''' Produces a Lorenz curve for the distribution of wealth, comparing simulated to actual data. A sub-function of makeCSTWresults(). Parameters ---------- real_wealth : np.array Data on household wealth. real_weights : np.array Weighting array of the same size as real_wealth. sim_wealth : np.array Simulated wealth holdings of many households. sim_weights :np.array Weighting array of the same size as sim_wealth. Returns ------- these_percents : np.array An array of percentiles of households, by wealth. real_lorenz : np.array Lorenz shares for real_wealth corresponding to these_percents. sim_lorenz : np.array Lorenz shares for sim_wealth corresponding to these_percents. ''' these_percents = np.linspace(0.0001, 0.9999, 201) real_lorenz = getLorenzShares(real_wealth, weights=real_weights, percentiles=these_percents) sim_lorenz = getLorenzShares(sim_wealth, weights=sim_weights, percentiles=these_percents) plt.plot(100 * these_percents, real_lorenz, '-k', linewidth=1.5) plt.plot(100 * these_percents, sim_lorenz, '--k', linewidth=1.5) plt.xlabel('Wealth percentile', fontsize=14) plt.ylabel('Cumulative wealth ownership', fontsize=14) plt.title('Simulated vs Actual Lorenz Curves', fontsize=16) plt.legend(('Actual', 'Simulated'), loc=2, fontsize=12) plt.ylim(-0.01, 1) plt.show() return (these_percents, real_lorenz, sim_lorenz)
def calcCSTWmpcStats(Agents): ''' Calculate and print to screen overall and education-specific aggregate wealth to income ratios, as well as the 20th, 40th, 60th, and 80th percentile points of the Lorenz curve for (liquid) wealth. Parameters ---------- Agents : [AgentType] List of AgentTypes in the economy. Returns ------- None ''' yLvlAll = np.concatenate([ThisType.lLvlNow for ThisType in Agents]) aLvlAll = np.concatenate([ThisType.aLvlNow for ThisType in Agents]) AgeAll = np.concatenate([ThisType.t_age for ThisType in Agents]) EducAll = np.concatenate([ ThisType.EducType * np.ones(ThisType.AgentCount) for ThisType in Agents ]) WeightAll = 1.01**(-0.25 * AgeAll) yAgg = np.dot(yLvlAll, WeightAll) aAgg = np.dot(aLvlAll, WeightAll) yAggD = np.dot(yLvlAll, WeightAll * (EducAll == 0)) yAggH = np.dot(yLvlAll, WeightAll * (EducAll == 1)) yAggC = np.dot(yLvlAll, WeightAll * (EducAll == 2)) aAggD = np.dot(aLvlAll, WeightAll * (EducAll == 0)) aAggH = np.dot(aLvlAll, WeightAll * (EducAll == 1)) aAggC = np.dot(aLvlAll, WeightAll * (EducAll == 2)) LorenzPts = getLorenzShares(aLvlAll, weights=WeightAll, percentiles=[0.2, 0.4, 0.6, 0.8]) print('Overall aggregate wealth to income ratio is ' + mystr(aAgg / yAgg) + ' (target 6.60).') print('Aggregate wealth to income ratio for dropouts is ' + mystr(aAggD / yAggD) + ' (target 1.60).') print('Aggregate wealth to income ratio for high school grads is ' + mystr(aAggH / yAggH) + ' (target 3.78).') print('Aggregate wealth to income ratio for college grads is ' + mystr(aAggC / yAggC) + ' (target 8.84).') print('Share of liquid wealth of the bottom 20% is ' + mystr(100 * LorenzPts[0]) + '% (target 0.0%).') print('Share of liquid wealth of the bottom 40% is ' + mystr(100 * LorenzPts[1]) + '% (target 0.4%).') print('Share of liquid wealth of the bottom 60% is ' + mystr(100 * LorenzPts[2]) + '% (target 2.5%).') print('Share of liquid wealth of the bottom 80% is ' + mystr(100 * LorenzPts[3]) + '% (target 11.7%).')
def objectiveFuncWealth(center,spread): ''' Objective function of the beta-dist estimation, similar to cstwMPC. Minimizes the distance between simulated and actual 20-40-60-80 Lorenz curve points and average wealth to income ratio. Parameters ---------- center : float Mean of distribution of discount factor. spread : float Half width of span of discount factor. Returns ------- distance : float Distance between simulated and actual moments. ''' DiscFacSet = approxUniform(N=TypeCount,bot=center-spread,top=center+spread)[1] for j in range(TypeCount): Agents[j](DiscFac = DiscFacSet[j]) multiThreadCommands(Agents,['solve()','initializeSim()','simulate()']) aLvl_sim = np.concatenate([agent.aLvlNow for agent in Agents]) aNrm_sim = np.concatenate([agent.aNrmNow for agent in Agents]) aNrmMean_sim = np.mean(aNrm_sim) Lorenz_sim = list(getLorenzShares(aLvl_sim,percentiles=percentile_targets)) moments_sim = np.array([aNrmMean_sim] + Lorenz_sim) moments_diff = moments_sim - moments_data moments_diff[1:] *= 1 # Rescale Lorenz shares distance = np.sqrt(np.dot(moments_diff,moments_diff)) print('Tried center=' + str(center) + ', spread=' + str(spread) + ', got distance=' + str(distance)) print(moments_sim) return distance
print("The mean of individual wealth is "+ str(sim_wealth.mean()) + ";\n the standard deviation is " + str(sim_wealth.std())+";\n the median is " + str(np.median(sim_wealth)) +".") # %% {"code_folding": []} # Get some tools for plotting simulated vs actual wealth distributions from HARK.utilities import getLorenzShares, getPercentiles # The cstwMPC model conveniently has data on the wealth distribution # from the U.S. Survey of Consumer Finances from HARK.cstwMPC.SetupParamsCSTW import SCF_wealth, SCF_weights # %% {"code_folding": []} # Construct the Lorenz curves and plot them pctiles = np.linspace(0.001,0.999,15) SCF_Lorenz_points = getLorenzShares(SCF_wealth,weights=SCF_weights,percentiles=pctiles) sim_Lorenz_points = getLorenzShares(sim_wealth,percentiles=pctiles) # Plot plt.figure(figsize=(5,5)) plt.title('Wealth Distribution') plt.plot(pctiles,SCF_Lorenz_points,'--k',label='SCF') plt.plot(pctiles,sim_Lorenz_points,'-b',label='Benchmark KS') plt.plot(pctiles,pctiles,'g-.',label='45 Degree') plt.xlabel('Percentile of net worth') plt.ylabel('Cumulative share of wealth') plt.legend(loc=2) plt.ylim([0,1]) make_figs('wealth_distribution_1', True, False) # remark.show('')
def runRoszypalSchlaffmanExperiment(CorrAct, CorrPcvd, DiscFac_center, DiscFac_spread, numTypes, simPeriods): ''' Solve and simulate a consumer type who misperceives the extent of serial correlation of persistent shocks to income. Parameters ---------- CorrAct : float Serial correlation coefficient for *actual* persistent income. CorrPcvd : float List or array of *perceived* persistent income serial correlation DiscFac_center : float A measure of centrality for the distribution of the beta parameter, DiscFac. DiscFac_spread : float A measure of spread or diffusion for the distribution of the beta parameter. numTypes: int Number of different types of agents (distributed using DiscFac_center and DiscFac_spread) simPeriods: int Number of periods to simulate before calculating distributions Returns ------- AggWealthRatio: float Ratio of Aggregate wealth to income. Lorenz: numpy.array A list of two 1D array representing the Lorenz curve for assets in the most recent simulated period. Gini: float Gini coefficient for assets in the most recent simulated period. Avg_MPC: numpy.array Average marginal propensity to consume by income quintile in the latest simulated period. ''' # Make a dictionary to construct our consumer type ThisDict = copy(BaselineDict) ThisDict['PrstIncCorr'] = CorrAct # Make a N=numTypes point approximation to a uniform distribution of DiscFac DiscFac_list = approxUniform(N=numTypes,bot=DiscFac_center-DiscFac_spread,top=DiscFac_center+DiscFac_spread)[1] type_list = [] # Make a PersistentShockConsumerTypeX for each value of beta saved in DiscFac_list for i in range(len(DiscFac_list)): ThisDict['DiscFac'] = DiscFac_list[i] ThisType = PersistentShockConsumerTypeX(**ThisDict) # Make the consumer *believe* he will face a different level of persistence ThisType.PrstIncCorr = CorrPcvd ThisType.updatepLvlNextFunc() # *thinks* E[p_{t+1}] as a function of p_t is different than it is # Solve the consumer's problem with *perceived* persistence ThisType.solve() # Make the consumer type experience the true level of persistence during simulation ThisType.PrstIncCorr = CorrAct ThisType.updatepLvlNextFunc() # Simulate the agents for many periods ThisType.T_sim = simPeriods #ThisType.track_vars = ['cLvlNow','aLvlNow','pLvlNow','MPCnow'] ThisType.initializeSim() ThisType.simulate() type_list.append(ThisType) # Get the most recent simulated values of X = cLvlNow, MPCnow, aLvlNow, pLvlNow for all types cLvl_all = np.concatenate([ThisType.cLvlNow for ThisType in type_list]) aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in type_list]) MPC_all = np.concatenate([ThisType.MPCnow for ThisType in type_list]) pLvl_all = np.concatenate([ThisType.pLvlNow for ThisType in type_list]) # The ratio of aggregate assets over the income AggWealthRatio = np.mean(aLvl_all) / np.mean(pLvl_all) # first 1D array: Create points in the range (0,1) wealth_percentile = np.linspace(0.001,0.999,201) # second 1D array: Compute Lorenz shares for the created points Lorenz_init = getLorenzShares(aLvl_all, percentiles=wealth_percentile) # Stick 0 and 1 at the boundaries of both arrays to make it inclusive on the range [0,1] Lorenz_init = np.concatenate([[0],Lorenz_init,[1]]) wealth_percentile = np.concatenate([[0],wealth_percentile,[1]]) # Create a list of wealth_percentile 1D array and Lorenz Shares 1D array Lorenz = np.stack((wealth_percentile, Lorenz_init)) # Compute the Gini coefficient Gini = 1.0 - 2.0*np.mean(Lorenz_init[1]) # Compute the average MPC by income quintile in the latest simulated period Avg_MPC = calcSubpopAvg(MPC_all, pLvl_all, cutoffs=[(0.0,0.2), (0.2,0.4), (0.4,0.6), (0.6,0.8), (0.8,1.0)]) return AggWealthRatio, Lorenz, Gini, Avg_MPC
def calcStats(self, aLvlNow, pLvlNow, MPCnow, lIncomeLvl, EmpNow, t_age, LorenzBool, ManyStatsBool): ''' Calculate various statistics about the current population in the economy. Parameters ---------- aLvlNow : [np.array] Arrays with end-of-period assets, listed by each ConsumerType in self.agents. pLvlNow : [np.array] Arrays with permanent income levels, listed by each ConsumerType in self.agents. MPCnow : [np.array] Arrays with marginal propensity to consume, listed by each ConsumerType in self.agents. lIncomeLvl : [np.array] Arrays with labor income levels, listed by each ConsumerType in self.agents. EmpNow : [np.array] Arrays with employment states: True if employed, False otherwise. t_age : [np.array] Arrays with periods elapsed since model entry, listed by each ConsumerType in self.agents. LorenzBool: bool Indicator for whether the Lorenz target points should be calculated. Usually False, only True when DiscFac has been identified for a particular nabla. ManyStatsBool: bool Indicator for whether a lot of statistics for tables should be calculated. Usually False, only True when parameters have been estimated and we want values for tables. Returns ------- None ''' # Combine inputs into single arrays aLvl = np.hstack(aLvlNow) pLvl = np.hstack(pLvlNow) age = np.hstack(t_age) IncLvl = np.hstack(lIncomeLvl) Emp = np.hstack(EmpNow) # Calculate the capital to income ratio in the economy CohortWeight = self.PopGroFac**(-age) CapAgg = np.sum(aLvl * CohortWeight) IncAgg = np.sum(IncLvl * CohortWeight) KtoYnow = CapAgg / IncAgg self.KtoYnow = KtoYnow # Store Lorenz data if requested self.LorenzLong = np.nan if LorenzBool: order = np.argsort(aLvl) aLvl = aLvl[order] CohortWeight = CohortWeight[order] wealth_shares = getLorenzShares(aLvl, weights=CohortWeight, percentiles=self.LorenzPercentiles, presorted=True) self.Lorenz = wealth_shares if ManyStatsBool: self.LorenzLong = getLorenzShares(aLvl, weights=CohortWeight, percentiles=np.arange( 0.01, 1.0, 0.01), presorted=True) else: self.Lorenz = np.nan # Store nothing if we don't want Lorenz data # Calculate a whole bunch of statistics if requested if ManyStatsBool: # Reshape other inputs MPC = np.hstack(MPCnow) # Sort other data items if aLvl and CohortWeight were sorted if LorenzBool: pLvl = pLvl[order] MPC = MPC[order] IncLvl = IncLvl[order] age = age[order] Emp = Emp[order] aNrm = aLvl / pLvl # Normalized assets (wealth ratio) # Calculate overall population MPC and by subpopulations # MPC_cf_BPP is the MPC that is comparable with the empirical estimation method MPC_cf_BPP = 1.0 - 0.25 * ((1.0 - MPC) + (1.0 - MPC)**2 + (1.0 - MPC)**3 + (1.0 - MPC)**4) self.MPCall = np.sum( MPC_cf_BPP * CohortWeight) / np.sum(CohortWeight) employed = Emp unemployed = np.logical_not(employed) self.MPCbyWealthRatio = calcSubpopAvg(MPC_cf_BPP, aNrm, self.cutoffs, CohortWeight) self.MPCbyIncome = calcSubpopAvg(MPC_cf_BPP, IncLvl, self.cutoffs, CohortWeight) # Calculate the wealth quintile distribution of "hand to mouth" consumers quintile_cuts = getPercentiles(aLvl, weights=CohortWeight, percentiles=[0.2, 0.4, 0.6, 0.8]) wealth_quintiles = np.ones(aLvl.size, dtype=int) wealth_quintiles[aLvl > quintile_cuts[0]] = 2 wealth_quintiles[aLvl > quintile_cuts[1]] = 3 wealth_quintiles[aLvl > quintile_cuts[2]] = 4 wealth_quintiles[aLvl > quintile_cuts[3]] = 5 MPC_cutoff = getPercentiles( MPC_cf_BPP, weights=CohortWeight, percentiles=[ 2.0 / 3.0 ]) # Looking at consumers with MPCs in the top 1/3 these = MPC_cf_BPP > MPC_cutoff in_top_third_MPC = wealth_quintiles[these] temp_weights = CohortWeight[these] hand_to_mouth_total = np.sum(temp_weights) hand_to_mouth_pct = [] for q in range(1, 6): hand_to_mouth_pct.append( np.sum(temp_weights[in_top_third_MPC == q]) / hand_to_mouth_total) self.HandToMouthPct = np.array(hand_to_mouth_pct) else: # If we don't want these stats, just put empty values in history self.MPCall = np.nan self.MPCunemployed = np.nan self.MPCemployed = np.nan self.MPCretired = np.nan self.MPCbyWealthRatio = np.nan self.MPCbyIncome = np.nan self.HandToMouthPct = np.nan
def main(): # Set targets for K/Y and the Lorenz curve based on the data lorenz_target = getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=Params.percentiles_to_match) lorenz_long_data = np.hstack((np.array(0.0),getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) KY_target = 10.26 # Make AgentTypes for estimation InfiniteType = cstwMPCagent(**Params.init_infinite) InfiniteType.AgeDstn = np.array(1.0) EstimationAgentList = [] #Rsave_list = [1.015,1.016,1.017,1.018,1.018,1.019,1.019,1.02,1.021,1.021,1.022,1.022,1.023,1.024,1.026] #20percent truncation Rsave_list = [1.013,1.015,1.016,1.017,1.018,1.019,1.019,1.02,1.021,1.022,1.023,1.023,1.024,1.026,1.029] #10percent truncation pref_type_count = 3 # Number of discrete beta types in beta-dist r_type_count = len(Rsave_list) # declare the number of types we want total_types = r_type_count * pref_type_count for n in range(total_types): EstimationAgentList.append(deepcopy(InfiniteType)) assignRdistribution(EstimationAgentList,Rsave_list) # Make an economy for the consumers to live in EstimationEconomy = cstwMPCmarket(**Params.init_market) EstimationEconomy.agents = EstimationAgentList EstimationEconomy.KYratioTarget = KY_target EstimationEconomy.LorenzTarget = lorenz_target EstimationEconomy.LorenzData = lorenz_long_data EstimationEconomy.PopGroFac = 1.0 EstimationEconomy.TypeWeight = [1.0] EstimationEconomy.act_T = Params.T_sim_PY EstimationEconomy.ignore_periods = Params.ignore_periods_PY # Choose the bounding region for the parameter search spec_name = 'BetaDistPY' param_name = 'DiscFac' # Which parameter to introduce heterogeneity in dist_type = 'uniform' # Which type of distribution to use if param_name == 'CRRA': param_range = [0.2,70.0] spread_range = [0.00001,1.0] elif param_name == 'DiscFac': param_range = [0.95,0.99] spread_range = [0,0.02] else: print('Parameter range for ' + param_name + ' has not been defined!') # Run the param-dist estimation paramDistObjective = lambda spread : findLorenzDistanceAtTargetKY( Economy = EstimationEconomy, param_name = param_name, param_count = total_types, center_range = param_range, spread = spread, dist_type = dist_type) t_start = clock() spread_estimate = golden(paramDistObjective,brack=spread_range,tol=1e-6) center_estimate = EstimationEconomy.center_save t_end = clock() # Display statistics about the estimated model EstimationEconomy.LorenzBool = True EstimationEconomy.ManyStatsBool = True EstimationEconomy.distributeParams(param_name,total_types,center_estimate,spread_estimate,dist_type) EstimationEconomy.solve() EstimationEconomy.calcLorenzDistance() print('Estimate is center=' + str(center_estimate) + ', spread=' + str(spread_estimate) + ', took ' + str(t_end-t_start) + ' seconds.') EstimationEconomy.center_estimate = center_estimate EstimationEconomy.spread_estimate = spread_estimate EstimationEconomy.showManyStats(spec_name)
def main(): # Set targets for K/Y and the Lorenz curve based on the data if Params.do_liquid: lorenz_target = np.array([0.0, 0.004, 0.025,0.117]) KY_target = 6.60 else: # This is hacky until I can find the liquid wealth data and import it lorenz_target = getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=Params.percentiles_to_match) lorenz_long_data = np.hstack((np.array(0.0),getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) #lorenz_target = np.array([-0.002, 0.01, 0.053,0.171]) KY_target = 10.26 # Make AgentTypes for estimation if Params.do_lifecycle: DropoutType = cstwMPCagent(**Params.init_dropout) DropoutType.AgeDstn = calcStationaryAgeDstn(DropoutType.LivPrb,True) HighschoolType = deepcopy(DropoutType) HighschoolType(**Params.adj_highschool) HighschoolType.AgeDstn = calcStationaryAgeDstn(HighschoolType.LivPrb,True) CollegeType = deepcopy(DropoutType) CollegeType(**Params.adj_college) CollegeType.AgeDstn = calcStationaryAgeDstn(CollegeType.LivPrb,True) DropoutType.update() HighschoolType.update() CollegeType.update() EstimationAgentList = [] for n in range(Params.pref_type_count): EstimationAgentList.append(deepcopy(DropoutType)) EstimationAgentList.append(deepcopy(HighschoolType)) EstimationAgentList.append(deepcopy(CollegeType)) else: if Params.do_agg_shocks: PerpetualYouthType = cstwMPCagent(**Params.init_agg_shocks) else: PerpetualYouthType = cstwMPCagent(**Params.init_infinite) PerpetualYouthType.AgeDstn = np.array(1.0) EstimationAgentList = [] for n in range(Params.pref_type_count): EstimationAgentList.append(deepcopy(PerpetualYouthType)) # Give all the AgentTypes different seeds for j in range(len(EstimationAgentList)): EstimationAgentList[j].seed = j # Make an economy for the consumers to live in EstimationEconomy = cstwMPCmarket(**Params.init_market) EstimationEconomy.agents = EstimationAgentList EstimationEconomy.KYratioTarget = KY_target EstimationEconomy.LorenzTarget = lorenz_target EstimationEconomy.LorenzData = lorenz_long_data if Params.do_lifecycle: EstimationEconomy.PopGroFac = Params.PopGroFac EstimationEconomy.TypeWeight = Params.TypeWeight_lifecycle EstimationEconomy.T_retire = Params.working_T-1 EstimationEconomy.act_T = Params.T_sim_LC EstimationEconomy.ignore_periods = Params.ignore_periods_LC else: EstimationEconomy.PopGroFac = 1.0 EstimationEconomy.TypeWeight = [1.0] EstimationEconomy.act_T = Params.T_sim_PY EstimationEconomy.ignore_periods = Params.ignore_periods_PY if Params.do_agg_shocks: EstimationEconomy(**Params.aggregate_params) EstimationEconomy.update() EstimationEconomy.makeAggShkHist() # Estimate the model as requested if Params.run_estimation: # Choose the bounding region for the parameter search if Params.param_name == 'CRRA': param_range = [0.2,70.0] spread_range = [0.00001,1.0] elif Params.param_name == 'DiscFac': param_range = [0.95,0.995] spread_range = [0.006,0.008] else: print('Parameter range for ' + Params.param_name + ' has not been defined!') if Params.do_param_dist: # Run the param-dist estimation paramDistObjective = lambda spread : findLorenzDistanceAtTargetKY( Economy = EstimationEconomy, param_name = Params.param_name, param_count = Params.pref_type_count, center_range = param_range, spread = spread, dist_type = Params.dist_type) t_start = clock() spread_estimate = golden(paramDistObjective,brack=spread_range,tol=1e-4) center_estimate = EstimationEconomy.center_save t_end = clock() else: # Run the param-point estimation only paramPointObjective = lambda center : getKYratioDifference(Economy = EstimationEconomy, param_name = Params.param_name, param_count = Params.pref_type_count, center = center, spread = 0.0, dist_type = Params.dist_type) t_start = clock() center_estimate = brentq(paramPointObjective,param_range[0],param_range[1],xtol=1e-6) spread_estimate = 0.0 t_end = clock() # Display statistics about the estimated model #center_estimate = 0.986609223266 #spread_estimate = 0.00853886395698 EstimationEconomy.LorenzBool = True EstimationEconomy.ManyStatsBool = True EstimationEconomy.distributeParams(Params.param_name,Params.pref_type_count,center_estimate,spread_estimate,Params.dist_type) EstimationEconomy.solve() EstimationEconomy.calcLorenzDistance() print('Estimate is center=' + str(center_estimate) + ', spread=' + str(spread_estimate) + ', took ' + str(t_end-t_start) + ' seconds.') EstimationEconomy.center_estimate = center_estimate EstimationEconomy.spread_estimate = spread_estimate EstimationEconomy.showManyStats(Params.spec_name)
j += 1 # Only run below this line if module is run rather than imported: if __name__ == "__main__": # ================================================================= # ====== Make the list of consumer types for estimation =========== #================================================================== # Set target Lorenz points and K/Y ratio (MOVE THIS TO SetupParams) if Params.do_liquid: lorenz_target = np.array([0.0, 0.004, 0.025, 0.117]) KY_target = 6.60 else: # This is hacky until I can find the liquid wealth data and import it lorenz_target = getLorenzShares( Params.SCF_wealth, weights=Params.SCF_weights, percentiles=Params.percentiles_to_match) #lorenz_target = np.array([-0.002, 0.01, 0.053,0.171]) KY_target = 10.26 # Make a vector of initial wealth-to-permanent income ratios a_init = drawDiscrete(N=Params.sim_pop_size, P=Params.a0_probs, X=Params.a0_values, seed=Params.a0_seed) # Make the list of types for this run, whether infinite or lifecycle if Params.do_lifecycle: # Make cohort scaling array cohort_scale = Params.TFP_growth**(-np.arange(Params.total_T + 1)) cohort_scale_array = np.tile(
return AgeDstn ############################################################################### ### ACTUAL WORK BEGINS BELOW THIS LINE ####################################### ############################################################################### if __name__ == '__main__': # Set targets for K/Y and the Lorenz curve based on the data if do_liquid: lorenz_target = np.array([0.0, 0.004, 0.025, 0.117]) KY_target = 6.60 else: # This is hacky until I can find the liquid wealth data and import it lorenz_target = getLorenzShares( Params.SCF_wealth, weights=Params.SCF_weights, percentiles=Params.percentiles_to_match) lorenz_long_data = np.hstack( (np.array(0.0), getLorenzShares(Params.SCF_wealth, weights=Params.SCF_weights, percentiles=np.arange(0.01, 1.0, 0.01).tolist()), np.array(1.0))) #lorenz_target = np.array([-0.002, 0.01, 0.053,0.171]) KY_target = 10.26 # Set total number of simulated agents in the population if do_param_dist: if do_agg_shocks: Population = Params.pop_sim_agg_dist else: