Example #1
0
 def initializeSim(self):
     IndShockConsumerType.initializeSim(self)
     if self.global_markov:  #Need to initialize markov state to be the same for all agents
         base_draw = drawUniform(1,seed=self.RNG.randint(0,2**31-1))
         Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
         self.MrkvNow = np.ones(self.AgentCount)*np.searchsorted(Cutoffs,base_draw).astype(int)
     self.MrkvNow = self.MrkvNow.astype(int)
Example #2
0
    def getShocks(self):
        '''
        Draws a new Markov state and income shocks for the representative agent.

        Parameters
        ----------
        None

        Returns
        -------
        None
        '''
        cutoffs = np.cumsum(self.MrkvArray[self.MrkvNow,:])
        MrkvDraw = drawUniform(N=1,seed=self.RNG.randint(0,2**31-1))
        self.MrkvNow = np.searchsorted(cutoffs,MrkvDraw)

        t = self.t_cycle[0]
        i = self.MrkvNow[0]
        IncomeDstnNow    = self.IncomeDstn[t-1][i] # set current income distribution
        PermGroFacNow    = self.PermGroFac[t-1][i] # and permanent growth factor
        Indices          = np.arange(IncomeDstnNow[0].size) # just a list of integers
        # Get random draws of income shocks from the discrete distribution
        EventDraw        = drawDiscrete(N=1,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
        PermShkNow = IncomeDstnNow[1][EventDraw]*PermGroFacNow # permanent "shock" includes expected growth
        TranShkNow = IncomeDstnNow[2][EventDraw]
        self.PermShkNow = np.array(PermShkNow)
        self.TranShkNow = np.array(TranShkNow)
Example #3
0
    def simDeath(self):
        '''
        Determines which agents die this period and must be replaced.  Uses the sequence in LivPrb
        to determine survival probabilities for each agent.

        Parameters
        ----------
        None

        Returns
        -------
        which_agents : np.array(bool)
            Boolean array of size AgentCount indicating which agents die.
        '''
        # Determine who dies
        LivPrb = np.array(self.LivPrb)[
            self.t_cycle - 1,
            self.MrkvNow]  # Time has already advanced, so look back one
        DiePrb = 1.0 - LivPrb
        DeathShks = drawUniform(N=self.AgentCount,
                                seed=self.RNG.randint(0, 2**31 - 1))
        which_agents = DeathShks < DiePrb
        if self.T_age is not None:  # Kill agents that have lived for too many periods
            too_old = self.t_age >= self.T_age
            which_agents = np.logical_or(which_agents, too_old)
        return which_agents
 def hitWithPandemicShock(self):
     '''
     Alter the Markov state of each simulated agent, jumping some people into
     an otherwise inaccessible "deep unemployment" state, and others into
     normal unemployment.
     '''
     # Calculate (cumulative) probabilities of each agent being shocked into each state
     age = (self.t_age/4) + 24
     DeepX = self.DeepParam0 + self.DeepParam1*np.log(self.pLvlNow) + self.DeepParam2*age + self.DeepParam3*age**2
     UnempX = self.UnempParam0 + self.UnempParam1*np.log(self.pLvlNow) + self.UnempParam2*age + self.UnempParam3*age**2
     expDeepX = np.exp(DeepX)
     expUnempX = np.exp(UnempX)
     denom = 1. + expDeepX + expUnempX
     EmpPrb = 1./denom
     UnempPrb = expUnempX/denom
     DeepPrb = expDeepX/denom
     PrbArray = np.vstack([EmpPrb,UnempPrb,DeepPrb])
     CumPrbArray = np.cumsum(PrbArray, axis=0)
     
     # Draw new Markov states for each agent
     draws = drawUniform(self.AgentCount, seed=self.RNG.randint(0,2**31-1))
     draws = self.RNG.permutation(draws)
     MrkvNew = np.zeros(self.AgentCount, dtype=int)
     MrkvNew[draws > CumPrbArray[0]] = 1
     MrkvNew[draws > CumPrbArray[1]] = 2
     if (self.PanShock and not self.L_shared): # If the pandemic actually occurs,
         MrkvNew += 3 # then put everyone into the low marginal utility world/
         # This is (momentarily) skipped over if the lockdown state is shared
         # rather than idiosyncratic.  See a few lines below.
     
     # Move agents to those Markov states 
     self.MrkvNow = MrkvNew
     
     # Take the appropriate shock history for each agent, depending on their state
     J = self.MrkvArray[0].shape[0]
     for j in range(J):
         these = self.MrkvNow == j
         self.who_dies_hist[:,these] = self.DeathHistAll[j,:,:][:,these]
         self.MrkvNow_hist[:,these] = self.MrkvHistAll[j,:,:][:,these]
         self.PermShkNow_hist[:,these] = self.PermShkHistAll[j,:,:][:,these]
         self.TranShkNow_hist[:,these] = self.TranShkHistAll[j,:,:][:,these]
     
     # If the lockdown is a common/shared event, rather than idiosyncratic, bump
     # everyone into the lockdown state for *exactly* T_lockdown periods
     if (self.PanShock and self.L_shared):
         T = self.T_lockdown
         self.MrkvNow_hist[0:T,:] += 3
         
     # Edit the first period of the shock history to give all unemployed
     # people a bonus payment in just that quarter
     one_off_benefits = True   # If agents get continued unemployment benefits, the first period benefits are counted later
     if hasattr(self,'ContUnempBenefits'):
         if self.ContUnempBenefits==True:
             one_off_benefits = False
     if one_off_benefits:
         young = self.age_base < self.T_retire
         unemp = np.logical_and(np.mod(self.MrkvNow,3) == 1, young)
         deep  = np.logical_and(np.mod(self.MrkvNow,3) == 2, young)
         self.TranShkNow_hist[0,unemp] += self.BonusUnemp/(self.pLvlNow[unemp]*self.PermShkNow_hist[0,unemp])
         self.TranShkNow_hist[0,deep]  += self.BonusDeep/(self.pLvlNow[deep]*self.PermShkNow_hist[0,deep])
Example #5
0
    def getMarkovStates(self):
        '''
        Draw new Markov states for each agent in the simulated population, using
        the attribute MrkvArray to determine transition probabilities.
        
        Parameters
        ----------
        None

        Returns
        -------
        None
        '''
        # Draw random numbers that will be used to determine the next Markov state
        if self.global_markov:
            base_draws = np.ones(self.AgentCount) * drawUniform(
                1, seed=self.RNG.randint(0, 2**31 - 1))
        else:
            base_draws = drawUniform(self.AgentCount,
                                     seed=self.RNG.randint(0, 2**31 - 1))
        dont_change = self.t_age == 0  # Don't change Markov state for those who were just born (unless global_markov)
        if self.t_sim == 0:  # Respect initial distribution of Markov states
            dont_change[:] = True

        # Determine which agents are in which states right now
        J = self.MrkvArray[0].shape[0]
        MrkvPrev = self.MrkvNow
        MrkvNow = np.zeros(self.AgentCount, dtype=int)
        MrkvBoolArray = np.zeros((J, self.AgentCount))
        for j in range(J):
            MrkvBoolArray[j, :] = MrkvPrev == j

        # Draw new Markov states for each agent
        for t in range(self.T_cycle):
            Cutoffs = np.cumsum(self.MrkvArray[t], axis=1)
            right_age = self.t_cycle == t
            for j in range(J):
                these = np.logical_and(right_age, MrkvBoolArray[j, :])
                MrkvNow[these] = np.searchsorted(Cutoffs[j, :],
                                                 base_draws[these]).astype(int)
        if not self.global_markov:
            MrkvNow[dont_change] = MrkvPrev[dont_change]
        self.MrkvNow = MrkvNow.astype(int)
Example #6
0
    def getShocks(self):
        '''
        Gets new Markov states and permanent and transitory income shocks for this period.  Samples
        from IncomeDstn for each period-state in the cycle.

        Parameters
        ----------
        None

        Returns
        -------
        None
        '''
        # Get new Markov states for each agent
        if self.global_markov:
            base_draws = np.ones(self.AgentCount)*drawUniform(1,seed=self.RNG.randint(0,2**31-1))
        else:
            base_draws = self.RNG.permutation(np.arange(self.AgentCount,dtype=float)/self.AgentCount + 1.0/(2*self.AgentCount))
        newborn = self.t_age == 0 # Don't change Markov state for those who were just born (unless global_markov)
        MrkvPrev = self.MrkvNow
        MrkvNow = np.zeros(self.AgentCount,dtype=int)
        for t in range(self.T_cycle):
            Cutoffs = np.cumsum(self.MrkvArray[t],axis=1)
            for j in range(self.MrkvArray[t].shape[0]):
                these = np.logical_and(self.t_cycle == t,MrkvPrev == j)
                MrkvNow[these] = np.searchsorted(Cutoffs[j,:],base_draws[these]).astype(int)
        if not self.global_markov:
                MrkvNow[newborn] = MrkvPrev[newborn]
        self.MrkvNow = MrkvNow.astype(int)

        # Now get income shocks for each consumer, by cycle-time and discrete state
        PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
        TranShkNow = np.zeros(self.AgentCount)
        for t in range(self.T_cycle):
            for j in range(self.MrkvArray[t].shape[0]):
                these = np.logical_and(t == self.t_cycle, j == MrkvNow)
                N = np.sum(these)
                if N > 0:
                    IncomeDstnNow    = self.IncomeDstn[t-1][j] # set current income distribution
                    PermGroFacNow    = self.PermGroFac[t-1][j] # and permanent growth factor
                    Indices          = np.arange(IncomeDstnNow[0].size) # just a list of integers
                    # Get random draws of income shocks from the discrete distribution
                    EventDraws       = drawDiscrete(N,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
                    PermShkNow[these] = IncomeDstnNow[1][EventDraws]*PermGroFacNow # permanent "shock" includes expected growth
                    TranShkNow[these] = IncomeDstnNow[2][EventDraws]
        newborn = self.t_age == 0
        PermShkNow[newborn] = 1.0
        TranShkNow[newborn] = 1.0
        self.PermShkNow = PermShkNow
        self.TranShkNow = TranShkNow
Example #7
0
    def simBirth(self,which_agents):
        '''
        Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
        discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        IndShockConsumerType.simBirth(self,which_agents) # Get initial assets and permanent income
        if not self.global_markov:  #Markov state is not changed if it is set at the global level
            N = np.sum(which_agents)
            base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
            Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
            self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int)
Example #8
0
 def test_drawUniform(self):
     self.assertEqual(simulation.drawUniform(1)[0], 0.5488135039273248)