def makeCSTWresults(DiscFac, nabla, save_name=None): ''' Produces a variety of results for the cstwMPC paper (usually after estimating). Parameters ---------- DiscFac : float Center of the uniform distribution of discount factors nabla : float Width of the uniform distribution of discount factors save_name : string Name to save the calculated results, for later use in producing figures and tables, etc. Returns ------- none ''' DiscFac_list = approxUniform(N=Params.pref_type_count, bot=DiscFac - nabla, top=DiscFac + nabla)[1] assignBetaDistribution(est_type_list, DiscFac_list) multiThreadCommandsFake(est_type_list, beta_point_commands) lorenz_distance = np.sqrt(betaDistObjective(nabla)) makeCSTWstats(DiscFac, nabla, est_type_list, Params.age_weight_all, lorenz_distance, save_name)
def updateEvolution(self): ''' Updates the "population punk proportion" evolution array. Fasion victims believe that the proportion of punks in the subsequent period is a linear function of the proportion of punks this period, subject to a uniform shock. Given attributes of self pNextIntercept, pNextSlope, pNextCount, pNextWidth, and pGrid, this method generates a new array for the attri- bute pEvolution, representing a discrete approximation of next period states for each current period state in pGrid. Parameters ---------- none Returns ------- none ''' self.pEvolution = np.zeros((self.pCount, self.pNextCount)) for j in range(self.pCount): pNow = self.pGrid[j] pNextMean = self.pNextIntercept + self.pNextSlope * pNow dist = approxUniform(N=self.pNextCount, bot=pNextMean - self.pNextWidth, top=pNextMean + self.pNextWidth)[1] self.pEvolution[j, :] = dist
def calcKappaMean(DiscFac, nabla): ''' Calculates the average MPC for the given parameters. This is a very small sub-function of sensitivityAnalysis. Parameters ---------- DiscFac : float Center of the uniform distribution of discount factors nabla : float Width of the uniform distribution of discount factors Returns ------- kappa_all : float Average marginal propensity to consume in the population. ''' DiscFac_list = approxUniform(N=Params.pref_type_count, bot=DiscFac - nabla, top=DiscFac + nabla)[1] assignBetaDistribution(est_type_list, DiscFac_list) multiThreadCommandsFake(est_type_list, beta_point_commands) kappa_all = calcWeightedAvg( np.vstack((this_type.kappa_history for this_type in est_type_list)), np.tile(Params.age_weight_all / float(Params.pref_type_count), Params.pref_type_count)) return kappa_all
def distributeParams(self, param_name, param_count, center, spread, dist_type): ''' Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. Parameters ---------- param_name : string Name of the parameter to be assigned. param_count : int Number of different values the parameter will take on. center : float A measure of centrality for the distribution of the parameter. spread : float A measure of spread or diffusion for the distribution of the parameter. dist_type : string The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). Returns ------- None ''' # Get a list of discrete values for the parameter if dist_type == 'uniform': # If uniform, center is middle of distribution, spread is distance to either edge param_dist = approxUniform(N=param_count, bot=center - spread, top=center + spread) elif dist_type == 'lognormal': # If lognormal, center is the mean and spread is the standard deviation (in log) tail_N = 3 param_dist = approxLognormal(N=param_count - tail_N, mu=np.log(center) - 0.5 * spread**2, sigma=spread, tail_N=tail_N, tail_bound=[0.0, 0.9], tail_order=np.e) # Distribute the parameters to the various types, assigning consecutive types the same # value if there are more types than values replication_factor = len(self.agents) // param_count # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit j = 0 b = 0 while j < len(self.agents): for n in range(replication_factor): self.agents[j](AgentCount=int(self.Population * param_dist[0][b] * self.TypeWeight[n])) exec('self.agents[j](' + param_name + '= param_dist[1][b])') j += 1 b += 1
def simulateKYratioDifference(DiscFac, nabla, N, type_list, weights, total_output, target): ''' Assigns a uniform distribution over DiscFac with width 2*nabla and N points, then solves and simulates all agent types in type_list and compares the simuated K/Y ratio to the target K/Y ratio. Parameters ---------- DiscFac : float Center of the uniform distribution of discount factors. nabla : float Width of the uniform distribution of discount factors. N : int Number of discrete consumer types. type_list : [cstwMPCagent] List of agent types to solve and simulate after assigning discount factors. weights : np.array Age-conditional array of population weights. total_output : float Total output of the economy, denominator for the K/Y calculation. target : float Target level of capital-to-output ratio. Returns ------- my_diff : float Difference between simulated and target capital-to-output ratios. ''' if type(DiscFac) in (list, np.ndarray, np.array): DiscFac = DiscFac[0] DiscFac_list = approxUniform(N, DiscFac - nabla, DiscFac + nabla)[1] # only take values, not probs assignBetaDistribution(type_list, DiscFac_list) multiThreadCommandsFake(type_list, beta_point_commands) my_diff = calculateKYratioDifference( np.vstack((this_type.W_history for this_type in type_list)), np.tile(weights / float(N), N), total_output, target) return my_diff
def objectiveFuncMPC(center,spread): ''' Objective function of the beta-dist estimation, similar to cstwMPC. Minimizes the distance between simulated and actual mean semiannual MPCs by wealth quintile. Parameters ---------- center : float Mean of distribution of discount factor. spread : float Half width of span of discount factor. Returns ------- distance : float Distance between simulated and actual moments. ''' DiscFacSet = approxUniform(N=TypeCount,bot=center-spread,top=center+spread)[1] for j in range(TypeCount): Agents[j](DiscFac = DiscFacSet[j]) multiThreadCommands(Agents,['solve()','initializeSim()','simulate()']) aLvl_sim = np.concatenate([agent.aLvlNow for agent in Agents]) MPC_sim = np.concatenate([agent.MPCnow for agent in Agents]) MPC_alt = 1. - (1. - MPC_sim)**2 MPC_by_aLvl = calcSubpopAvg(MPC_alt,aLvl_sim,cutoffs) moments_sim = MPC_by_aLvl moments_diff = moments_sim - moments_data moments_diff[1:] *= 1 # Rescale Lorenz shares distance = np.sqrt(np.dot(moments_diff,moments_diff)) print('Tried center=' + str(center) + ', spread=' + str(spread) + ', got distance=' + str(distance)) print(moments_sim) return distance
def objectiveFuncWealth(center,spread): ''' Objective function of the beta-dist estimation, similar to cstwMPC. Minimizes the distance between simulated and actual 20-40-60-80 Lorenz curve points and average wealth to income ratio. Parameters ---------- center : float Mean of distribution of discount factor. spread : float Half width of span of discount factor. Returns ------- distance : float Distance between simulated and actual moments. ''' DiscFacSet = approxUniform(N=TypeCount,bot=center-spread,top=center+spread)[1] for j in range(TypeCount): Agents[j](DiscFac = DiscFacSet[j]) multiThreadCommands(Agents,['solve()','initializeSim()','simulate()']) aLvl_sim = np.concatenate([agent.aLvlNow for agent in Agents]) aNrm_sim = np.concatenate([agent.aNrmNow for agent in Agents]) aNrmMean_sim = np.mean(aNrm_sim) Lorenz_sim = list(getLorenzShares(aLvl_sim,percentiles=percentile_targets)) moments_sim = np.array([aNrmMean_sim] + Lorenz_sim) moments_diff = moments_sim - moments_data moments_diff[1:] *= 1 # Rescale Lorenz shares distance = np.sqrt(np.dot(moments_diff,moments_diff)) print('Tried center=' + str(center) + ', spread=' + str(spread) + ', got distance=' + str(distance)) print(moments_sim) return distance
# - Immediate transitions between the extreme values of $\beta$ occur with probability zero. # - The average duration of the highest and lowest $\beta$’s is 50 years. # # The HARK toolkit is not natively set up to accommodate stochastic time preference factors (though an extension to accommodate this would be easy). # # Here, instead, we assume that different agents have different values of $\beta$ that are uniformly distributed over some range. We approximate the uniform distribution by three points. The agents are heterogeneous _ex ante_ (and permanently). # %% {"code_folding": []} # Construct the distribution of types from HARK.utilities import approxUniform # Specify the distribution of the discount factor num_types = 3 # number of types we want; DiscFac_mean = 0.9858 # center of beta distribution DiscFac_spread = 0.0085 # spread of beta distribution DiscFac_dstn = approxUniform(num_types, DiscFac_mean-DiscFac_spread, DiscFac_mean+DiscFac_spread)[1] BaselineType = deepcopy(KSAgent) MyTypes = [] # initialize an empty list to hold our consumer types for nn in range(len(DiscFac_dstn)): # Now create the types, and append them to the list MyTypes NewType = deepcopy(BaselineType) NewType.DiscFac = DiscFac_dstn[nn] NewType.seed = nn # give each consumer type a different RNG seed MyTypes.append(NewType) # %% {"code_folding": []} # Put all agents into the economy KSEconomy_sim = CobbDouglasMarkovEconomy(agents = MyTypes, **KSEconomyDictionary) KSEconomy_sim.AggShkDstn = KSAggShkDstn # Agg shocks are the same as defined earlier
NewType = deepcopy(BaselineType) NewType.seed = nn # give each consumer type a different RNG seed ConsumerTypes.append(NewType) # %% [markdown] # Now we can give each of the consumer types their own discount factor. (This distribution of parameters was estimated in the paper ["The Distribution of Wealth and the Marginal Propensity to Consume" by Carroll, Slacalek, Tokuoka, and White (2017) (cstwMPC)](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC). # %% {"code_folding": [0]} # Seven types is enough to approximate the uniform distribution (5 is not quite enough) from HARK.utilities import approxUniform # Calibrations from cstwMPC bottomDiscFac = 0.9800 topDiscFac = 0.9934 DiscFac_list = approxUniform(N=num_consumer_types, bot=bottomDiscFac, top=topDiscFac)[1] # Now, assign the discount factors for j in range(num_consumer_types): ConsumerTypes[j].DiscFac = DiscFac_list[j] # %% [markdown] # Our agents now exist and have a concept of the problem they face, but we still need them to solve that problem. # # Once we have solved each type of consumer's individual problem, we need to know the distribution of wealth (and permanent income) that the population would achieve in the long run. # # The cell below does both of those tasks, looping through the consumer types. For each one, it solves that type's infinite horizon model, then simulates 1000 periods to generate an approximation to the long run distribution of wealth. # %% {"code_folding": [0]} for ConsumerType in tqdm(ConsumerTypes):
def FagerengObjFunc(center, spread, verbose=False): ''' Objective function for the quick and dirty structural estimation to fit Fagereng, Holm, and Natvik's Table 9 results with a basic infinite horizon consumption-saving model (with permanent and transitory income shocks). Parameters ---------- center : float Center of the uniform distribution of discount factors. spread : float Width of the uniform distribution of discount factors. verbose : bool When True, print to screen MPC table for these parameters. When False, print (center, spread, distance). Returns ------- distance : float Euclidean distance between simulated MPCs and (adjusted) Table 9 MPCs. ''' # Give our consumer types the requested discount factor distribution beta_set = approxUniform(N=TypeCount, bot=center - spread, top=center + spread)[1] for j in range(TypeCount): EstTypeList[j](DiscFac=beta_set[j]) # Solve and simulate all consumer types, then gather their wealth levels multiThreadCommands( EstTypeList, ['solve()', 'initializeSim()', 'simulate()', 'unpackcFunc()']) WealthNow = np.concatenate([ThisType.aLvlNow for ThisType in EstTypeList]) # Get wealth quartile cutoffs and distribute them to each consumer type quartile_cuts = getPercentiles(WealthNow, percentiles=[0.25, 0.50, 0.75]) for ThisType in EstTypeList: WealthQ = np.zeros(ThisType.AgentCount, dtype=int) for n in range(3): WealthQ[ThisType.aLvlNow > quartile_cuts[n]] += 1 ThisType(WealthQ=WealthQ) # Keep track of MPC sets in lists of lists of arrays MPC_set_list = [[[], [], [], []], [[], [], [], []], [[], [], [], []], [[], [], [], []]] # Calculate the MPC for each of the four lottery sizes for all agents for ThisType in EstTypeList: ThisType.simulate(1) c_base = ThisType.cNrmNow MPC_this_type = np.zeros((ThisType.AgentCount, 4)) for k in range(4): # Get MPC for all agents of this type Llvl = lottery_size[k] Lnrm = Llvl / ThisType.pLvlNow if do_secant: SplurgeNrm = Splurge / ThisType.pLvlNow mAdj = ThisType.mNrmNow + Lnrm - SplurgeNrm cAdj = ThisType.cFunc[0](mAdj) + SplurgeNrm MPC_this_type[:, k] = (cAdj - c_base) / Lnrm else: mAdj = ThisType.mNrmNow + Lnrm MPC_this_type[:, k] = cAdj = ThisType.cFunc[0].derivative(mAdj) # Sort the MPCs into the proper MPC sets for q in range(4): these = ThisType.WealthQ == q for k in range(4): MPC_set_list[k][q].append(MPC_this_type[these, k]) # Calculate average within each MPC set simulated_MPC_means = np.zeros((4, 4)) for k in range(4): for q in range(4): MPC_array = np.concatenate(MPC_set_list[k][q]) simulated_MPC_means[k, q] = np.mean(MPC_array) # Calculate Euclidean distance between simulated MPC averages and Table 9 targets diff = simulated_MPC_means - MPC_target if drop_corner: diff[0, 0] = 0.0 distance = np.sqrt(np.sum((diff)**2)) if verbose: print(simulated_MPC_means) else: print(center, spread, distance) return distance
def runRoszypalSchlaffmanExperiment(CorrAct, CorrPcvd, DiscFac_center, DiscFac_spread, numTypes, simPeriods): ''' Solve and simulate a consumer type who misperceives the extent of serial correlation of persistent shocks to income. Parameters ---------- CorrAct : float Serial correlation coefficient for *actual* persistent income. CorrPcvd : float List or array of *perceived* persistent income serial correlation DiscFac_center : float A measure of centrality for the distribution of the beta parameter, DiscFac. DiscFac_spread : float A measure of spread or diffusion for the distribution of the beta parameter. numTypes: int Number of different types of agents (distributed using DiscFac_center and DiscFac_spread) simPeriods: int Number of periods to simulate before calculating distributions Returns ------- AggWealthRatio: float Ratio of Aggregate wealth to income. Lorenz: numpy.array A list of two 1D array representing the Lorenz curve for assets in the most recent simulated period. Gini: float Gini coefficient for assets in the most recent simulated period. Avg_MPC: numpy.array Average marginal propensity to consume by income quintile in the latest simulated period. ''' # Make a dictionary to construct our consumer type ThisDict = copy(BaselineDict) ThisDict['PrstIncCorr'] = CorrAct # Make a N=numTypes point approximation to a uniform distribution of DiscFac DiscFac_list = approxUniform(N=numTypes,bot=DiscFac_center-DiscFac_spread,top=DiscFac_center+DiscFac_spread)[1] type_list = [] # Make a PersistentShockConsumerTypeX for each value of beta saved in DiscFac_list for i in range(len(DiscFac_list)): ThisDict['DiscFac'] = DiscFac_list[i] ThisType = PersistentShockConsumerTypeX(**ThisDict) # Make the consumer *believe* he will face a different level of persistence ThisType.PrstIncCorr = CorrPcvd ThisType.updatepLvlNextFunc() # *thinks* E[p_{t+1}] as a function of p_t is different than it is # Solve the consumer's problem with *perceived* persistence ThisType.solve() # Make the consumer type experience the true level of persistence during simulation ThisType.PrstIncCorr = CorrAct ThisType.updatepLvlNextFunc() # Simulate the agents for many periods ThisType.T_sim = simPeriods #ThisType.track_vars = ['cLvlNow','aLvlNow','pLvlNow','MPCnow'] ThisType.initializeSim() ThisType.simulate() type_list.append(ThisType) # Get the most recent simulated values of X = cLvlNow, MPCnow, aLvlNow, pLvlNow for all types cLvl_all = np.concatenate([ThisType.cLvlNow for ThisType in type_list]) aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in type_list]) MPC_all = np.concatenate([ThisType.MPCnow for ThisType in type_list]) pLvl_all = np.concatenate([ThisType.pLvlNow for ThisType in type_list]) # The ratio of aggregate assets over the income AggWealthRatio = np.mean(aLvl_all) / np.mean(pLvl_all) # first 1D array: Create points in the range (0,1) wealth_percentile = np.linspace(0.001,0.999,201) # second 1D array: Compute Lorenz shares for the created points Lorenz_init = getLorenzShares(aLvl_all, percentiles=wealth_percentile) # Stick 0 and 1 at the boundaries of both arrays to make it inclusive on the range [0,1] Lorenz_init = np.concatenate([[0],Lorenz_init,[1]]) wealth_percentile = np.concatenate([[0],wealth_percentile,[1]]) # Create a list of wealth_percentile 1D array and Lorenz Shares 1D array Lorenz = np.stack((wealth_percentile, Lorenz_init)) # Compute the Gini coefficient Gini = 1.0 - 2.0*np.mean(Lorenz_init[1]) # Compute the average MPC by income quintile in the latest simulated period Avg_MPC = calcSubpopAvg(MPC_all, pLvl_all, cutoffs=[(0.0,0.2), (0.2,0.4), (0.4,0.6), (0.6,0.8), (0.8,1.0)]) return AggWealthRatio, Lorenz, Gini, Avg_MPC
if i == j: PolyMrkvArray[i, j] = Persistence elif (i == (j - 1)) or (i == (j + 1)): PolyMrkvArray[i, j] = 0.5 * (1.0 - Persistence) PolyMrkvArray[0, 0] += 0.5 * (1.0 - Persistence) PolyMrkvArray[StateCount - 1, StateCount - 1] += 0.5 * (1.0 - Persistence) PolyMrkvArray *= 1.0 - RegimeChangePrb PolyMrkvArray += RegimeChangePrb / StateCount # Define the set of aggregate permanent growth factors that can occur (Markov specifications only) PermGroFacSet = np.exp( np.linspace(np.log(PermGroFacMin), np.log(PermGroFacMax), num=StateCount)) # Define the set of discount factors that agents have (for SOE and DSGE models) DiscFacSetSOE = approxUniform(N=TypeCount, bot=DiscFacMeanSOE - DiscFacSpread, top=DiscFacMeanSOE + DiscFacSpread)[1] DiscFacSetDSGE = approxUniform(N=TypeCount, bot=DiscFacMeanDSGE - DiscFacSpread, top=DiscFacMeanDSGE + DiscFacSpread)[1] ############################################################################### # Define parameters for the small open economy version of the model init_SOE_consumer = { 'CRRA': CRRA, 'DiscFac': DiscFacMeanSOE, 'LivPrb': [LivPrb], 'PermGroFac': [1.0], 'AgentCount': AgentCount // TypeCount, # Spread agents evenly among types 'aXtraMin': 0.00001,
AggregationFactor = 253.0 ############################################################################### # Basic lifecycle length parameters (don't touch these) init_age = 24 working_T = 41*4 # Number of working periods retired_T = 55*4 # Number of retired periods T_cycle = working_T + retired_T AgentCountTotal = 50000 # Total simulated population T_sim = 13 # Number of quarters to simulate in counterfactuals # Define the distribution of the discount factor for each eduation level DiscFacCount = 7 DiscFacDstnD = approxUniform(DiscFacCount, DiscFacMeanD-DiscFacSpread, DiscFacMeanD+DiscFacSpread) DiscFacDstnH = approxUniform(DiscFacCount, DiscFacMeanH-DiscFacSpread, DiscFacMeanH+DiscFacSpread) DiscFacDstnC = approxUniform(DiscFacCount, DiscFacMeanC-DiscFacSpread, DiscFacMeanC+DiscFacSpread) DiscFacDstns = [DiscFacDstnD, DiscFacDstnH, DiscFacDstnC] # Define permanent income growth rates for each education level (from Cagetti 2003) PermGroRte_d_ann = [5.2522391e-002, 5.0039782e-002, 4.7586132e-002, 4.5162424e-002, 4.2769638e-002, 4.0408757e-002, 3.8080763e-002, 3.5786635e-002, 3.3527358e-002, 3.1303911e-002, 2.9117277e-002, 2.6968437e-002, 2.4858374e-002, 2.2788068e-002, 2.0758501e-002, 1.8770655e-002, 1.6825511e-002, 1.4924052e-002, 1.3067258e-002, 1.1256112e-002, 9.4915947e-003, 7.7746883e-003, 6.1063742e-003, 4.4876340e-003, 2.9194495e-003, 1.4028022e-003, -6.1326258e-005, -1.4719542e-003, -2.8280999e-003, -4.1287819e-003, -5.3730185e-003, -6.5598280e-003, -7.6882288e-003, -8.7572392e-003, -9.7658777e-003, -1.0713163e-002, -1.1598112e-002, -1.2419745e-002, -1.3177079e-002, -1.3869133e-002, -4.3985368e-001, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003] PermGroRte_h_ann = [4.1102173e-002, 4.1194381e-002, 4.1117402e-002, 4.0878307e-002, 4.0484168e-002, 3.9942056e-002, 3.9259042e-002, 3.8442198e-002, 3.7498596e-002, 3.6435308e-002, 3.5259403e-002, 3.3977955e-002, 3.2598035e-002, 3.1126713e-002, 2.9571062e-002, 2.7938153e-002, 2.6235058e-002, 2.4468848e-002, 2.2646594e-002, 2.0775369e-002, 1.8862243e-002, 1.6914288e-002, 1.4938576e-002, 1.2942178e-002, 1.0932165e-002, 8.9156095e-003, 6.8995825e-003, 4.8911556e-003, 2.8974003e-003, 9.2538802e-004, -1.0178097e-003, -2.9251214e-003, -4.7894755e-003, -6.6038005e-003, -8.3610250e-003, -1.0054077e-002, -1.1675886e-002, -1.3219380e-002, -1.4677487e-002, -1.6043137e-002, -5.5864350e-001, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002] PermGroRte_c_ann = [3.9375106e-002, 3.9030288e-002, 3.8601230e-002, 3.8091011e-002, 3.7502710e-002, 3.6839406e-002, 3.6104179e-002, 3.5300107e-002, 3.4430270e-002, 3.3497746e-002, 3.2505614e-002, 3.1456953e-002, 3.0354843e-002, 2.9202363e-002, 2.8002591e-002, 2.6758606e-002, 2.5473489e-002, 2.4150316e-002, 2.2792168e-002, 2.1402124e-002, 1.9983263e-002, 1.8538663e-002, 1.7071404e-002, 1.5584565e-002, 1.4081224e-002, 1.2564462e-002, 1.1037356e-002, 9.5029859e-003, 7.9644308e-003, 6.4247695e-003, 4.8870812e-003, 3.3544449e-003, 1.8299396e-003, 3.1664424e-004, -1.1823620e-003, -2.6640003e-003, -4.1251914e-003, -5.5628564e-003, -6.9739162e-003, -8.3552918e-003, -6.8938447e-001, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004] PermGroRte_d_ann += 31*[PermGroRte_d_ann[-1]] # Add 31 years of the same permanent income growth rate to the end of the sequence PermGroRte_h_ann += 31*[PermGroRte_h_ann[-1]] PermGroRte_c_ann += 31*[PermGroRte_c_ann[-1]] PermGroRte_d_retire = PermGroRte_d_ann[40] # Store the big shock to permanent income at retirement PermGroRte_h_retire = PermGroRte_h_ann[40] PermGroRte_c_retire = PermGroRte_c_ann[40] PermGroRte_d_ann[40] = PermGroRte_d_ann[39] # Overwrite the "retirement drop" with the adjacent growth rate
BaseAggShksType = AggShockConsumerType(**Params.init_agg_shocks) agg_shocks_type_list = [] for j in range(Params.pref_type_count): new_type = deepcopy(BaseAggShksType) new_type.seed = j new_type.resetRNG() new_type.makeIncShkHist() agg_shocks_type_list.append(new_type) if Params.do_beta_dist: beta_agg = beta_dist_estimate nabla_agg = nabla_estimate else: beta_agg = beta_point_estimate nabla_agg = 0.0 DiscFac_list_agg = approxUniform(N=Params.pref_type_count, bot=beta_agg - nabla_agg, top=beta_agg + nabla_agg)[1] assignBetaDistribution(agg_shocks_type_list, DiscFac_list_agg) # Make a market for solving the FBS aggregate shocks model agg_shocks_market = CobbDouglasEconomy( agents=agg_shocks_type_list, act_T=Params.sim_periods_agg_shocks, tolerance=0.0001, **Params.aggregate_params) agg_shocks_market.makeAggShkHist() # Edit the consumer types so they have the right data for this_type in agg_shocks_market.agents: this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents, sigma=0.9,
AggShkDstnAlt_update[0] *= UpdatePrb PermShkAggDstnAlt_dont = [np.array([1.0]), np.array([1.0])] # Degenerate distribution TranShkAggDstnAlt_dont = approxMeanOneLognormal( 5, np.sqrt(TranShkAggVar + PermShkAggVar / UpdatePrb)) AggShkDstnAlt_dont = combineIndepDstns(PermShkAggDstnAlt_dont, TranShkAggDstnAlt_dont) AggShkDstnAlt_dont[0] *= 1. - UpdatePrb AggShkDstnAlt = StateCount * [[ np.concatenate([AggShkDstnAlt_update[n], AggShkDstnAlt_dont[n]]) for n in range(3) ]] # Define the set of discount factors that agents have (for SOE and DSGE models) DiscFacSetSOE = approxUniform(N=TypeCount, bot=DiscFacMeanSOE - DiscFacSpread, top=DiscFacMeanSOE + DiscFacSpread)[1] DiscFacSetDSGE = approxUniform(N=TypeCount, bot=DiscFacMeanDSGE - DiscFacSpread, top=DiscFacMeanDSGE + DiscFacSpread)[1] DiscFacSetSOE_parker = approxUniform( N=TypeCount_parker, bot=DiscFacMeanSOE_parker - DiscFacSpread_parker, top=DiscFacMeanSOE_parker + DiscFacSpread_parker)[1] ############################################################################### # Define parameters for the small open economy version of the model init_SOE_consumer = { 'CRRA': CRRA, 'DiscFac': DiscFacMeanSOE,