Esempio n. 1
0
    def simBirth(self, which_agents):
        '''
        Makes new consumers for the given indices.  Slightly extends base method by also setting
        pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
        of aggregate productivity for each Markov state.  The representative agent begins with
        the correct perception of the Markov state.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        if which_agents == np.array([True]):
            RepAgentMarkovConsumerType.simBirth(self, which_agents)
            if self.t_sim == 0:  # Initialize perception distribution for Markov state
                self.pLvlTrue = np.ones(self.AgentCount)
                self.aLvlNow = self.aNrmNow * self.pLvlTrue
                StateCount = self.MrkvArray.shape[0]
                self.pLvlNow = np.ones(
                    StateCount)  # Perceived productivity level by Markov state
                self.MrkvPcvd = np.zeros(
                    StateCount)  # Distribution of perceived Markov state
                self.MrkvPcvd[self.MrkvNow[
                    0]] = 1.0  # Correct perception of state initially
Esempio n. 2
0
class testRepAgentMarkovConsumerType(unittest.TestCase):
    def setUp(self):
        self.agent = RepAgentMarkovConsumerType()
        self.agent.IncShkDstn = [2 * [self.agent.IncShkDstn[0]]]
        self.agent.solve()

    def test_solution(self):
        self.assertAlmostEqual(self.agent.solution[0].cFunc[0](10).tolist(),
                               1.3829466326248048)

    def test_simulation(self):
        # Simulate the representative agent model
        self.agent.T_sim = 100
        self.agent.track_vars = ['cNrm', "mNrm", "Rfree", "wRte", "Mrkv"]
        self.agent.initialize_sim()
        self.agent.simulate()
t_end = time()
print(
    "Simulating a representative agent for "
    + str(RAexample.T_sim)
    + " periods took "
    + str(t_end - t_start)
    + " seconds."
)

# %%
# Make and solve a Markov representative agent
RA_markov_params = deepcopy(RA_params)
RA_markov_params["PermGroFac"] = [[0.97, 1.03]]
RA_markov_params["MrkvArray"] = np.array([[0.99, 0.01], [0.01, 0.99]])
RA_markov_params["MrkvNow"] = 0
RAmarkovExample = RepAgentMarkovConsumerType(**RA_markov_params)
RAmarkovExample.IncomeDstn[0] = 2 * [RAmarkovExample.IncomeDstn[0]]
t_start = time()
RAmarkovExample.solve()
t_end = time()
print(
    "Solving a two state representative agent problem took "
    + str(t_end - t_start)
    + " seconds."
)
plotFuncs(RAmarkovExample.solution[0].cFunc, 0, 10)

# %%
# Simulate the two state representative agent model
RAmarkovExample.T_sim = 2000
RAmarkovExample.track_vars = ["cNrmNow", "mNrmNow", "Rfree", "wRte", "MrkvNow"]
Esempio n. 4
0
 def setUp(self):
     self.agent = RepAgentMarkovConsumerType()
     self.agent.IncShkDstn[0] = 2 * [self.agent.IncShkDstn[0]]
     self.agent.solve()
Esempio n. 5
0
# Figure path
Fpath = "../Documento Final/Graphics/"

# Parameters
RA_markov_params = deepcopy(Params.init_idiosyncratic_shocks)
RA_markov_params['CRRA'] = 3.0
RA_markov_params['DiscFac'] = 0.99
RA_markov_params['DeprFac'] = 0.025
RA_markov_params['CapShare'] = 0.36
RA_markov_params['UnempPrb'] = 0.0
RA_markov_params['LivPrb'] = [1.0]
RA_markov_params['PermGroFac'] = [[1.01, 0.99]]
RA_markov_params['MrkvArray'] = np.array([[0.125, 0.875], [0.875, 0.125]])
RA_markov_params['MrkvNow'] = 0

# Create and solve
RA_Agg_model = RepAgentMarkovConsumerType(**RA_markov_params)
RA_Agg_model.IncomeDstn[0] = 2 * [RA_Agg_model.IncomeDstn[0]]
RA_Agg_model.solve()

# Consumption function
cFunc2 = RA_Agg_model.solution[0].cFunc[0]

# Plots
print('Consumption function:')
m_grid = np.linspace(0, 10, 200)
c_m_2 = cFunc2(m_grid)
plt.plot(m_grid, c_m_2)
plt.show()

MPC = (cFunc2(43.964) - cFunc2(43.963)) / 0.001