def get_markov_states(self): """ Draw new Markov states for each agent in the simulated population, using the attribute MrkvArray to determine transition probabilities. Parameters ---------- None Returns ------- None """ dont_change = ( self.t_age == 0 ) # Don't change Markov state for those who were just born (unless global_markov) if self.t_sim == 0: # Respect initial distribution of Markov states dont_change[:] = True # Determine which agents are in which states right now J = self.MrkvArray[0].shape[0] MrkvPrev = self.shocks["Mrkv"] MrkvNow = np.zeros(self.AgentCount, dtype=int) # Draw new Markov states for each agent for t in range(self.T_cycle): markov_process = MarkovProcess(self.MrkvArray[t], seed=self.RNG.randint(0, 2**31 - 1)) right_age = self.t_cycle == t MrkvNow[right_age] = markov_process.draw(MrkvPrev[right_age]) if not self.global_markov: MrkvNow[dont_change] = MrkvPrev[dont_change] self.shocks["Mrkv"] = MrkvNow.astype(int)
def test_draw(self): mrkv_array = np.array([[0.75, 0.25], [0.1, 0.9]]) mp = MarkovProcess(mrkv_array) new_state = mp.draw(np.zeros(100).astype(int)) self.assertEqual(new_state.sum(), 20) new_state = mp.draw(new_state) self.assertEqual(new_state.sum(), 39)
def get_shocks(self): """ Draws a new Markov state and income shocks for the representative agent. Parameters ---------- None Returns ------- None """ self.shocks["Mrkv"] = MarkovProcess( self.MrkvArray, seed=self.RNG.randint(0, 2 ** 31 - 1) ).draw(self.shocks["Mrkv"]) t = self.t_cycle[0] i = self.shocks["Mrkv"] IncShkDstnNow = self.IncShkDstn[t - 1][i] # set current income distribution PermGroFacNow = self.PermGroFac[t - 1][i] # and permanent growth factor # Get random draws of income shocks from the discrete distribution EventDraw = IncShkDstnNow.draw_events(1) PermShkNow = ( IncShkDstnNow.X[0][EventDraw] * PermGroFacNow ) # permanent "shock" includes expected growth TranShkNow = IncShkDstnNow.X[1][EventDraw] self.shocks['PermShk'] = np.array(PermShkNow) self.shocks['TranShk'] = np.array(TranShkNow)