def sim_birth(self, which_agents): """ Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as well as time variables t_age and t_cycle. Normalized assets and persistent income levels are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc). Parameters ---------- which_agents : np.array(Bool) Boolean array of size self.AgentCount indicating which agents should be "born". Returns ------- None """ # Get and store states for newly born agents N = np.sum(which_agents) # Number of new consumers to make aNrmNow_new = Lognormal(self.aNrmInitMean, self.aNrmInitStd, seed=self.RNG.randint(0, 2**31 - 1)).draw(N) self.state_now['pLvl'][which_agents] = Lognormal( self.pLvlInitMean, self.pLvlInitStd, seed=self.RNG.randint(0, 2**31 - 1)).draw(N) self.state_now['aLvl'][ which_agents] = aNrmNow_new * self.state_now['pLvl'][which_agents] self.t_age[ which_agents] = 0 # How many periods since each agent was born self.t_cycle[ which_agents] = 0 # Which period of the cycle each agent is currently in
def updateRiskyDstn(self): ''' Creates the attributes RiskyDstn from the primitive attributes RiskyAvg, RiskyStd, and RiskyCount, approximating the (perceived) distribution of returns in each period of the cycle. Parameters ---------- None Returns ------- None ''' # Determine whether this instance has time-varying risk perceptions if (type(self.RiskyAvg) is list) and (type( self.RiskyStd) is list) and (len(self.RiskyAvg) == len( self.RiskyStd)) and (len(self.RiskyAvg) == self.T_cycle): self.addToTimeVary('RiskyAvg', 'RiskyStd') elif (type(self.RiskyStd) is list) or (type(self.RiskyAvg) is list): raise AttributeError( 'If RiskyAvg is time-varying, then RiskyStd must be as well, and they must both have length of T_cycle!' ) else: self.addToTimeInv('RiskyAvg', 'RiskyStd') # Generate a discrete approximation to the risky return distribution if the # agent has age-varying beliefs about the risky asset if 'RiskyAvg' in self.time_vary: RiskyDstn = [] for t in range(self.T_cycle): RiskyAvgSqrd = self.RiskyAvg[t]**2 RiskyVar = self.RiskyStd[t]**2 mu = np.log(self.RiskyAvg[t] / (np.sqrt(1. + RiskyVar / RiskyAvgSqrd))) sigma = np.sqrt(np.log(1. + RiskyVar / RiskyAvgSqrd)) RiskyDstn.append( Lognormal(mu=mu, sigma=sigma).approx(self.RiskyCount)) self.RiskyDstn = RiskyDstn self.addToTimeVary('RiskyDstn') # Generate a discrete approximation to the risky return distribution if the # agent does *not* have age-varying beliefs about the risky asset (base case) else: RiskyAvgSqrd = self.RiskyAvg**2 RiskyVar = self.RiskyStd**2 mu = np.log(self.RiskyAvg / (np.sqrt(1. + RiskyVar / RiskyAvgSqrd))) sigma = np.sqrt(np.log(1. + RiskyVar / RiskyAvgSqrd)) self.RiskyDstn = Lognormal(mu=mu, sigma=sigma).approx(self.RiskyCount) self.addToTimeInv('RiskyDstn')
def getRisky(self): """ Sets the shock RiskyNow as a single draw from a lognormal distribution. Uses the attributes RiskyAvgTrue and RiskyStdTrue if RiskyAvg is time-varying, else just uses the single values from RiskyAvg and RiskyStd. Parameters ---------- None Returns ------- None """ if "RiskyDstn" in self.time_vary: RiskyAvg = self.RiskyAvgTrue RiskyStd = self.RiskyStdTrue else: RiskyAvg = self.RiskyAvg RiskyStd = self.RiskyStd RiskyAvgSqrd = RiskyAvg ** 2 RiskyVar = RiskyStd ** 2 mu = np.log(RiskyAvg / (np.sqrt(1.0 + RiskyVar / RiskyAvgSqrd))) sigma = np.sqrt(np.log(1.0 + RiskyVar / RiskyAvgSqrd)) self.shocks['RiskyNow'] = Lognormal( mu, sigma, seed=self.RNG.randint(0, 2 ** 31 - 1) ).draw(1)
def sim_birth(self, which_agents): """ Makes new consumers for the given indices. Initialized variables include aNrm, as well as time variables t_age and t_cycle. Normalized assets are drawn from a lognormal distributions given by aLvlInitMean and aLvlInitStd. Parameters ---------- which_agents : np.array(Bool) Boolean array of size self.AgentCount indicating which agents should be "born". Returns ------- None """ # Get and store states for newly born agents N = np.sum(which_agents) # Number of new consumers to make self.state_now['aLvl'][which_agents] = Lognormal( self.aLvlInitMean, sigma=self.aLvlInitStd, seed=self.RNG.randint(0, 2**31 - 1), ).draw(N) self.shocks["eStateNow"] = np.zeros( self.AgentCount) # Initialize shock array self.shocks["eStateNow"][ which_agents] = 1.0 # Agents are born employed self.t_age[ which_agents] = 0 # How many periods since each agent was born self.t_cycle[ which_agents] = 0 # Which period of the cycle each agent is currently in return None
def birth_aNrmNow(self, N): """ Birth value for aNrmNow """ return Lognormal( mu=self.aNrmInitMean, sigma=self.aNrmInitStd, seed=self.RNG.randint(0, 2 ** 31 - 1), ).draw(N)
def test_Lognormal(self): dist = Lognormal() self.assertEqual(dist.draw(1)[0], 5.836039190663969) dist.draw(100) dist.reset() self.assertEqual(dist.draw(1)[0], 5.836039190663969)
def birth_pLvlNow(self, N): """ Birth value for pLvlNow """ pLvlInitMeanNow = self.pLvlInitMean + np.log( self.state_now["PlvlAgg"] ) # Account for newer cohorts having higher permanent income return Lognormal(pLvlInitMeanNow, self.pLvlInitStd, seed=self.RNG.randint(0, 2**31 - 1)).draw(N)
def update_pLvlGrid(self): """ Update the grid of persistent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1 because the distribution of persistent income will be different within a period depending on how many cycles have elapsed. This method uses a simulation approach to generate the pLvlGrid at each period of the cycle, drawing on the initial distribution of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles. Parameters ---------- None Returns ------- None """ LivPrbAll = np.array(self.LivPrb) # Simulate the distribution of persistent income levels by t_cycle in a lifecycle model if self.cycles == 1: pLvlNow = Lognormal(self.pLvlInitMean, sigma=self.pLvlInitStd, seed=31382).draw(self.AgentCount) pLvlGrid = [] # empty list of time-varying persistent income grids # Calculate distribution of persistent income in each period of lifecycle for t in range(len(self.PermShkStd)): if t > 0: PermShkNow = self.PermShkDstn[t - 1].draw(N=self.AgentCount) pLvlNow = self.pLvlNextFunc[t - 1](pLvlNow) * PermShkNow pLvlGrid.append( get_percentiles(pLvlNow, percentiles=self.pLvlPctiles)) # Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle) elif self.cycles == 0: T_long = 1000 # Number of periods to simulate to get to "stationary" distribution pLvlNow = Lognormal(mu=self.pLvlInitMean, sigma=self.pLvlInitStd, seed=31382).draw(self.AgentCount) t_cycle = np.zeros(self.AgentCount, dtype=int) for t in range(T_long): LivPrb = LivPrbAll[ t_cycle] # Determine who dies and replace them with newborns draws = Uniform(seed=t).draw(self.AgentCount) who_dies = draws > LivPrb pLvlNow[who_dies] = Lognormal(self.pLvlInitMean, self.pLvlInitStd, seed=t + 92615).draw( np.sum(who_dies)) t_cycle[who_dies] = 0 for j in range(self.T_cycle): # Update persistent income these = t_cycle == j PermShkTemp = self.PermShkDstn[j].draw(N=np.sum(these)) pLvlNow[these] = self.pLvlNextFunc[j]( pLvlNow[these]) * PermShkTemp t_cycle = t_cycle + 1 t_cycle[t_cycle == self.T_cycle] = 0 # We now have a "long run stationary distribution", extract percentiles pLvlGrid = [] # empty list of time-varying persistent income grids for t in range(self.T_cycle): these = t_cycle == t pLvlGrid.append( get_percentiles(pLvlNow[these], percentiles=self.pLvlPctiles)) # Throw an error if cycles>1 else: assert False, "Can only handle cycles=0 or cycles=1!" # Store the result and add attribute to time_vary self.pLvlGrid = pLvlGrid self.add_to_time_vary("pLvlGrid")
def update_income_process(self): self.wage = 1 / (self.SSPmu) #calculate SS wage self.N = (self.mu_u * (self.IncUnemp * self.UnempPrb) + self.G) / ( self.wage * self.tax_rate ) #calculate SS labor supply from Budget Constraint PermShkDstn_U = Lognormal( np.log(self.mu_u) - (self.L * (self.PermShkStd[0])**2) / 2, self.L * self.PermShkStd[0], 123).approx(self.PermShkCount ) #Permanent Shock Distribution faced when unemployed PermShkDstn_E = MeanOneLogNormal(self.PermShkStd[0], 123).approx( self.PermShkCount ) #Permanent Shock Distribution faced when employed pmf_P = np.concatenate(((1 - self.UnempPrb) * PermShkDstn_E.pmf, self.UnempPrb * PermShkDstn_U.pmf)) X_P = np.concatenate((PermShkDstn_E.X, PermShkDstn_U.X)) PermShkDstn = [DiscreteDistribution(pmf_P, X_P)] self.PermShkDstn = PermShkDstn TranShkDstn_E = MeanOneLogNormal(self.TranShkStd[0], 123).approx( self.TranShkCount ) #Transitory Shock Distribution faced when employed TranShkDstn_E.X = ( TranShkDstn_E.X * (1 - self.tax_rate) * self.wage * self.N ) / ( 1 - self.UnempPrb )**2 #NEED TO FIX THIS SQUARE TERM #add wage, tax rate and labor supply lng = len(TranShkDstn_E.X) TranShkDstn_U = DiscreteDistribution( np.ones(lng) / lng, self.IncUnemp * np.ones(lng)) #Transitory Shock Distribution faced when unemployed IncShkDstn_E = combine_indep_dstns( PermShkDstn_E, TranShkDstn_E) # Income Distribution faced when Employed IncShkDstn_U = combine_indep_dstns( PermShkDstn_U, TranShkDstn_U) # Income Distribution faced when Unemployed #Combine Outcomes of both distributions X_0 = np.concatenate((IncShkDstn_E.X[0], IncShkDstn_U.X[0])) X_1 = np.concatenate((IncShkDstn_E.X[1], IncShkDstn_U.X[1])) X_I = [X_0, X_1] #discrete distribution takes in a list of arrays #Combine pmf Arrays pmf_I = np.concatenate(((1 - self.UnempPrb) * IncShkDstn_E.pmf, self.UnempPrb * IncShkDstn_U.pmf)) IncShkDstn = [DiscreteDistribution(pmf_I, X_I)] self.IncShkDstnN = IncShkDstn self.IncShkDstn = IncShkDstn self.add_to_time_vary('IncShkDstn') PermShkDstn_Uw = Lognormal( np.log(self.mu_u) - (self.L * (self.PermShkStd[0])**2) / 2, self.L * self.PermShkStd[0], 123).approx(self.PermShkCount ) #Permanent Shock Distribution faced when unemployed PermShkDstn_Ew = MeanOneLogNormal(self.PermShkStd[0], 123).approx( self.PermShkCount ) #Permanent Shock Distribution faced when employed TranShkDstn_Ew = MeanOneLogNormal(self.TranShkStd[0], 123).approx( self.TranShkCount ) #Transitory Shock Distribution faced when employed TranShkDstn_Ew.X = ( TranShkDstn_Ew.X * (1 - self.tax_rate) * (self.wage + self.dx) * self.N) / ( 1 - self.UnempPrb)**2 #add wage, tax rate and labor supply lng = len(TranShkDstn_Ew.X) TranShkDstn_Uw = DiscreteDistribution( np.ones(lng) / lng, self.IncUnemp * np.ones(lng)) #Transitory Shock Distribution faced when unemployed IncShkDstn_Ew = combine_indep_dstns( PermShkDstn_Ew, TranShkDstn_Ew) # Income Distribution faced when Employed IncShkDstn_Uw = combine_indep_dstns( PermShkDstn_Uw, TranShkDstn_Uw) # Income Distribution faced when Unemployed #Combine Outcomes of both distributions X_0 = np.concatenate((IncShkDstn_Ew.X[0], IncShkDstn_Uw.X[0])) X_1 = np.concatenate((IncShkDstn_Ew.X[1], IncShkDstn_Uw.X[1])) X_I = [X_0, X_1] #discrete distribution takes in a list of arrays #Combine pmf Arrays pmf_I = np.concatenate(((1 - self.UnempPrb) * IncShkDstn_Ew.pmf, self.UnempPrb * IncShkDstn_Uw.pmf)) IncShkDstnW = [DiscreteDistribution(pmf_I, X_I)] self.IncShkDstnW = IncShkDstnW self.add_to_time_vary('IncShkDstnW')