i_ε] - p * policy_m2[i_m1, i_m2, i_x, i_θ, i_ε]

    return V_new, policy_m1, policy_m2, policy_x, policy_c


qe.tic()
V = compute_fixed_point(bellman_operator_discrete,
                        V_guess,
                        max_iter=1,
                        error_tol=0.1)

V2 = compute_fixed_point(bellman_operator_discrete,
                         V,
                         max_iter=100,
                         error_tol=0.01)
qe.toc()
V_next, g_m1, g_m2, g_x, g_c = bellman_operator_policies(V2)

#%% Asset policy functions across shocks


def plot_policy(policy, policy_name, m0=0, x0=0, save=False, folder=folder):
    fig, ax = plt.subplots()
    ax.plot(m_grid,
            np.array(policy[:, m0, x0, 1, 1]),
            label=policy_name + r'($\theta=1, \varepsilon=1)$')
    ax.plot(m_grid,
            np.array(policy[:, m0, x0, 0, 1]),
            label=policy_name + r'($\theta=0, \varepsilon=1)$')
    ax.plot(m_grid,
            np.array(policy[:, m0, x0, 1, 0]),
Exemple #2
0
        g[i] = np.argmax(X[i, :])
    if np.max(Vj - Vi) >= epsilon:
        Vi = np.copy(Vj)
        Vj = np.empty([nk, 1])
        s += 1
    else:
        convergence = True

# Policy functions:
gk = np.empty(nk)
gc = np.empty(nk)
for i in range(nk):
    gk[i] = k[int(g[i])]
    gc[i] = k[i]**(1 - theta) + (1 - delta) * k[i] - gk[i]

T = qe.toc()

# Plotting:
plt.plot(k, Vj, label='V(k)')
plt.xlabel('k')
plt.ylabel('V(k)')
plt.show()

print("Convergence of the value functions took " + str(s) +
      " iterations, in " + str(T) + " seconds.")

#%% (b) Monotonicity of the optimal decision rule
#------------------------------------------------------------------------------
# Value function iteration:
qe.tic()
epsilon = 0.01