def _new_solution(sp, f, grp):
    "gets a new set of solution objects and updates the data file"

    # compute value function and policy rule using vfi
    v_init = np.zeros(len(sp.grid_points)) + sp.c / (1 - sp.beta)
    v = compute_fixed_point(sp.bellman_operator, v_init, error_tol=_tol,
                            max_iter=5000)
    phi_vfi = sp.get_greedy(v)

    # also run v through bellman so I can test if it is a fixed point
    # bellman_operator takes a long time, so store result instead of compute
    new_v = sp.bellman_operator(v)

    # compute policy rule using pfi

    phi_init = np.ones(len(sp.pi_grid))
    phi_pfi = compute_fixed_point(sp.res_wage_operator, phi_init,
                                  error_tol=_tol, max_iter=5000)

    # write all arrays to file
    write_array(f, grp, v, "v")
    write_array(f, grp, phi_vfi, "phi_vfi")
    write_array(f, grp, phi_pfi, "phi_pfi")
    write_array(f, grp, new_v, "new_v")

    # return data
    return v, phi_vfi, phi_pfi, new_v
    def compute_lt_price(self, error_tol=1e-3, max_iter=50, verbose=0):
        """
        Compute the equilibrium price function associated with Lucas
        tree lt

        Parameters
        ----------
        error_tol, max_iter, verbose
            Arguments to be passed directly to
            `quantecon.compute_fixed_point`. See that docstring for more
            information

        Returns
        -------
        price : array_like(float)
            The prices at the grid points in the attribute `grid` of the
            object

        """
        # == simplify notation == #
        grid, grid_size = self.grid, self.grid_size
        lucas_operator, gamma = self.lucas_operator, self.gamma

        # == Create storage array for compute_fixed_point. Reduces  memory
        # allocation and speeds code up == #
        Tf = np.empty(grid_size)

        # == Initial guess, just a vector of zeros == #
        f_init = np.zeros(grid_size)
        f = compute_fixed_point(lucas_operator, f_init, error_tol,
                                max_iter, verbose, Tf=Tf)

        price = f * grid**gamma

        return price
def _solve_via_pfi(cp, c_init):
    "compute policy rule using policy function iteration"
    p = compute_fixed_point(cp.coleman_operator, c_init, verbose=False,
                            error_tol=1e-5,
                            max_iter=1000)

    return p
Exemple #4
0
def all_param_interact(c, L0, L1, a0, b0, a1, b1, m):
    f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a0, b=b0), 1e-6, np.inf)
    f0 = f0 / np.sum(f0)
    f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a1, b=b1), 1e-6, np.inf)
    f1 = f1 / np.sum(f1)  # Make sure sums to 1

    # Create an instance of our WaldFriedman class
    wf = WaldFriedman(c, L0, L1, f0, f1, m=m)
    # Solve using qe's `compute_fixed_point` function
    J = qe.compute_fixed_point(wf.bellman_operator,
                               np.zeros(m),
                               error_tol=1e-7,
                               verbose=False,
                               print_skip=10,
                               max_iter=500)
    lb, ub = wf.find_cutoff_rule(J)

    # Get draws
    ndraws = 500
    cdist, tdist = wf.stopping_dist(ndraws=ndraws)

    fig, ax = plt.subplots(2, 2, figsize=(22, 14))

    ax[0, 0].plot(f0,
                  marker="o",
                  markersize=2.5,
                  linestyle="None",
                  label=r"$f_0$")
    ax[0, 0].plot(f1,
                  marker="o",
                  markersize=2.5,
                  linestyle="None",
                  label=r"$f_1$")
    ax[0, 0].set_ylabel(r"Probability of $z_k$")
    ax[0, 0].set_xlabel(r"$k$")
    ax[0, 0].set_title("Distributions over Outcomes", size=24)

    ax[0, 1].plot(wf.pgrid, J)
    ax[0, 1].annotate(r"$\rho_1$", xy=(lb + 0.025, 0.5), size=14)
    ax[0, 1].annotate(r"$\rho_2$", xy=(ub + 0.025, 0.5), size=14)
    ax[0, 1].vlines(lb, 0.0, wf.payoff_choose_f1(lb), linestyle="--")
    ax[0, 1].vlines(ub, 0.0, wf.payoff_choose_f0(ub), linestyle="--")
    ax[0, 1].set_ylim(0, 0.5 * max(L0, L1))
    ax[0, 1].set_ylabel("Value of Bellman")
    ax[0, 1].set_xlabel(r"$p_k$")
    ax[0, 1].set_title("Bellman Equation", size=24)

    ax[1, 0].hist(tdist, bins=np.max(tdist))
    ax[1, 0].set_title("Stopping Times", size=24)
    ax[1, 0].set_xlabel("Time")
    ax[1, 0].set_ylabel("Density")

    ax[1, 1].hist(cdist, bins=2)
    ax[1, 1].set_title("Correct Decisions", size=24)
    ax[1, 1].annotate("Percent Correct p={}".format(np.mean(cdist)),
                      xy=(0.05, ndraws / 2),
                      size=18)

    fig.tight_layout()
    fig.show()
 def test_contraction_1(self):
     "compute_fp: convergence inside interval of convergence"
     f = lambda x: self.T(x, self.mu_1)
     for i in self.unit_inverval:
         # should have fixed point of 0.0
         self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs))
                         < 1e-4)
Exemple #6
0
 def test_contraction_2(self):
     "compute_fp: convergence inside interval of convergence"
     f = lambda x: self.T(x, self.mu_2)
     fp = (4 * self.mu_2 - 1) / (4 * self.mu_2)
     for i in self.unit_inverval:
         # This should converge to fp
         assert_(abs(compute_fixed_point(f, i, **self.kwargs) - fp) < 1e-4)
Exemple #7
0
def _solve_via_pfi(cp, c_init):
    "compute policy rule using policy function iteration"
    p = compute_fixed_point(cp.coleman_operator, c_init, verbose=False,
                            error_tol=1e-5,
                            max_iter=1000)

    return p
 def test_not_contraction_2(self):
     "compute_fp: no convergence outside interval of convergence"
     f = lambda x: self.T(x, self.mu_2)
     for i in self.unit_inverval:
         # This shouldn't converge to 0.0
         self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs))
                          < 1e-4)
def _solve_via_vfi(jv):
    "compute policy rules via value function iteration"
    v_init = jv.x_grid * 0.6
    V = compute_fixed_point(jv.bellman_operator, v_init,
                            max_iter=3000,
                            error_tol=1e-5)
    return V
def _solve_via_vfi(jv):
    "compute policy rules via value function iteration"
    v_init = jv.x_grid * 0.6
    V = compute_fixed_point(jv.bellman_operator,
                            v_init,
                            max_iter=3000,
                            error_tol=1e-5)
    return V
 def test_not_contraction_1(self):
     "compute_fp: no convergence outside interval of convergence"
     f = lambda x: self.T(x, self.mu_1)
     fp = (4 * self.mu_1 - 1) / (4 * self.mu_1)
     for i in self.unit_inverval:
         # This should not converge  (b/c unique fp is 0.0)
         self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs)-fp)
                          < 1e-4)
 def test_contraction_2(self):
     "compute_fp: convergence inside interval of convergence"
     f = lambda x: self.T(x, self.mu_2)
     fp = (4 * self.mu_2 - 1) / (4 * self.mu_2)
     for i in self.unit_inverval:
         # This should converge to fp
         self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs)-fp)
                         < 1e-4)
    def setUpClass(cls):
        jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
        cls.jv = jv

        # compute solution
        v_init = _get_vf_guess(jv)
        cls.V = compute_fixed_point(jv.bellman_operator, v_init)
        cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
                                                     return_policies=True)
    def test_num_iter_one(self):
        init = 1.
        error_tol = self.coeff

        for method in self.methods:
            fp_computed = compute_fixed_point(self.f, init,
                                              error_tol=error_tol,
                                              method=method)
            ok_(fp_computed <= error_tol * 2)
    def test_2d_input(self):
        error_tol = self.coeff**4

        for method in self.methods:
            init = np.array([[-1, 0.5], [-1/3, 0.1]])
            fp_computed = compute_fixed_point(self.f, init,
                                              error_tol=error_tol,
                                              method=method)
            ok_((fp_computed <= error_tol * 2).all())
    def setUpClass(cls):
        jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
        cls.jv = jv

        # compute solution
        v_init = _get_vf_guess(jv)
        cls.V = compute_fixed_point(jv.bellman_operator, v_init)
        cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
                                                     return_policies=True)
    def test_imitation_game_method(self):
        "compute_fp: Test imitation game method"
        method = 'imitation_game'
        error_tol = self.kwargs['error_tol']

        for mu in [self.mu_1, self.mu_2]:
            for i in self.unit_inverval:
                fp_computed = compute_fixed_point(self.T, i, method=method,
                                                  mu=mu, **self.kwargs)
                self.assertTrue(
                    abs(self.T(fp_computed, mu=mu) - fp_computed) <= error_tol
                )

            # numpy array input
            i = np.asarray(self.unit_inverval)
            fp_computed = compute_fixed_point(self.T, i, method=method, mu=mu,
                                              **self.kwargs)
            self.assertTrue(
                abs(self.T(fp_computed, mu=mu) - fp_computed).max() <=
                error_tol
            )
    def test_num_iter_large(self):
        init = 1.
        buff_size = 2**8  # buff_size in 'imitation_game'
        max_iter = buff_size + 2
        error_tol = self.coeff**max_iter

        for method in self.methods:
            fp_computed = compute_fixed_point(self.f, init,
                                              error_tol=error_tol,
                                              max_iter=max_iter, method=method,
                                              print_skip=max_iter)
            ok_(fp_computed <= error_tol * 2)
Exemple #19
0
def _solve_via_vfi(cp, v_init, return_both=False):
    "compute policy rule using value function iteration"
    v = compute_fixed_point(cp.bellman_operator, v_init, verbose=False,
                            error_tol=1e-5,
                            max_iter=1000)

    # Run one more time to get the policy
    p = cp.bellman_operator(v, return_policy=True)

    if return_both:
        return v, p
    else:
        return p
def _solve_via_vfi(cp, v_init, return_both=False):
    "compute policy rule using value function iteration"
    v = compute_fixed_point(cp.bellman_operator, v_init, verbose=False,
                            error_tol=1e-5,
                            max_iter=1000)

    # Run one more time to get the policy
    p = cp.bellman_operator(v, return_policy=True)

    if return_both:
        return v, p
    else:
        return p
def _new_solution(gm, f, grp):
    "gets a new set of solution objects and updates the data file"

    # compute value function and policy rule using vfi
    v_init = 5 * gm.u(gm.grid) - 25
    v = compute_fixed_point(gm.bellman_operator, v_init, error_tol=_tol,
                            max_iter=5000)
    # sigma = gm.get_greedy(v)

    # write all arrays to file
    write_array(f, grp, v, "v")

    # return data
    return v
def all_param_interact(c, L0, L1, a0, b0, a1, b1, m):
    f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a0, b=b0), 1e-6, np.inf)
    f0 = f0 / np.sum(f0)
    f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a1, b=b1), 1e-6, np.inf)
    f1 = f1 / np.sum(f1)  # Make sure sums to 1

    # Create an instance of our WaldFriedman class
    wf = WaldFriedman(c, L0, L1, f0, f1, m=m)
    # Solve using qe's `compute_fixed_point` function
    J = qe.compute_fixed_point(wf.bellman_operator, np.zeros(m),
                               error_tol=1e-7, verbose=False,
                               print_skip=10, max_iter=500)
    lb, ub = wf.find_cutoff_rule(J)

    # Get draws
    ndraws = 500
    cdist, tdist = wf.stopping_dist(ndraws=ndraws)

    fig, ax = plt.subplots(2, 2, figsize=(22, 14))

    ax[0, 0].plot(f0, marker="o", markersize=2.5, linestyle="None", label=r"$f_0$")
    ax[0, 0].plot(f1, marker="o", markersize=2.5, linestyle="None", label=r"$f_1$")
    ax[0, 0].set_ylabel(r"Probability of $z_k$")
    ax[0, 0].set_xlabel(r"$k$")
    ax[0, 0].set_title("Distributions over Outcomes", size=24)

    ax[0, 1].plot(wf.pgrid, J)
    ax[0, 1].annotate(r"$\alpha$", xy=(lb+0.025, 0.5), size=14)
    ax[0, 1].annotate(r"$\beta$", xy=(ub+0.025, 0.5), size=14)
    ax[0, 1].vlines(lb, 0.0, wf.payoff_choose_f1(lb), linestyle="--")
    ax[0, 1].vlines(ub, 0.0, wf.payoff_choose_f0(ub), linestyle="--")
    ax[0, 1].set_ylim(0, 0.5*max(L0, L1))
    ax[0, 1].set_ylabel("Value of Bellman")
    ax[0, 1].set_xlabel(r"$p_k$")
    ax[0, 1].set_title("Bellman Equation", size=24)

    ax[1, 0].hist(tdist, bins=np.max(tdist))
    ax[1, 0].set_title("Stopping Times", size=24)
    ax[1, 0].set_xlabel("Time")
    ax[1, 0].set_ylabel("Density")

    ax[1, 1].hist(cdist, bins=2)
    ax[1, 1].set_title("Correct Decisions", size=24)
    ax[1, 1].annotate("Percent Correct p={}".format(np.mean(cdist)), xy=(0.05, ndraws/2), size=18)

    fig.tight_layout()
    fig.show()
def compute_asset_series_bell(cp,z_seq, T=T, verbose=False):
    """
    Simulates a time series of length T for assets, given optimal savings
    behavior.  Parameter cp is an instance of consumerProblem
    """

    Pi, z_vals, R, w= cp.Pi, cp.z_vals, cp.R, cp.w  # Simplify names
    #mc = MarkovChain(Pi)
    v_init, h_init, c_init = initialize(cp)
    K_bell = lambda c: bellman_operator(c, cp, return_policy=False)
    v = qe.compute_fixed_point(K_bell, v_init, verbose= False, max_iter =250, error_tol = tol_bell)
    policy = bellman_operator(v, cp, return_policy=True)
    h =  policy[:,0]
    l = policy[:,1]
    #vf = lambda a, i_z: np.interp(a, cp.asset_grid, v[:, i_z])
    #hf = lambda a, i_z: np.interp(a, cp.asset_grid, h[:, i_z])
    #lf = lambda a, i_z: np.interp(a, cp.asset_grid, l[:, i_z])
    asset_grid = cp.asset_grid
    k = cp.k
    @jit(nopython=True)
    def hf(a, i_z):
        i = int((a-asset_grid[0])/k)
        #return linterp(a, asset_grid[i:i+2], h[i:i+2, i_z])
        #x, xp, yp = a, asset_grid[i:i+2], h[i:i+2, i_z]
        #return (yp[0]*(xp[1]-x) + yp[1]*(x-xp[0]))/(xp[1]-xp[0])
        y = (h[i,i_z]*(asset_grid[i+1]-a) + h[i+1,i_z]*(a-asset_grid[i]))/(asset_grid[i+1]-asset_grid[i])
        return y
    
    @jit(nopython=True)
    def lf(a, i_z):
        i = int((a-asset_grid[0])/k)
        #return linterp(a, asset_grid[i:i+2], h[i:i+2, i_z])
        #x, xp, yp = a, asset_grid[i:i+2], h[i:i+2, i_z]
        #return (yp[0]*(xp[1]-x) + yp[1]*(x-xp[0]))/(xp[1]-xp[0])
        y = (l[i,i_z]*(asset_grid[i+1]-a) + l[i+1,i_z]*(a-asset_grid[i]))/(asset_grid[i+1]-asset_grid[i])
        return y

    a = np.zeros(T)
    a[0] = cp.b
    #z_seq = mc.simulate(T)
    z_rlz = np.zeros(T) #total labour supply after endogenous decisions. That is, e*(1-l)
    h_val = np.zeros(T)
    l_val = np.zeros(T) #liesure choice l! do NOT confuse with labour 
    a, h_val, l_val, z_rlz = series(T, a, h_val,l_val, z_rlz, z_seq, z_vals, hf, lf)
    
    return np.asarray(a), np.asarray(z_rlz), np.asarray(h_val), np.asarray(l_val), policy
def compute_lt_price(tree, error_tol=1e-6, max_iter=500, verbose=0):
    """
    Compute the equilibrium price function associated with Lucas
    tree lt

    Parameters
    ----------
    tree : An instance of LucasTree
        Contains parameters

    error_tol, max_iter, verbose
        Arguments to be passed directly to
        `quantecon.compute_fixed_point`. See that docstring for more
        information

    Returns
    -------
    price : array_like(float)
        The prices at the grid points in the attribute `grid` of the
        object

    """
    # == simplify notation == #
    grid, grid_size = tree.grid, tree.grid_size
    gamma = tree.gamma

    # == Create storage array for compute_fixed_point. Reduces  memory
    # allocation and speeds code up == #
    Tf = np.empty(grid_size)

    # == Initial guess, just a vector of zeros == #
    f_init = np.zeros(grid_size)
    f = compute_fixed_point(lucas_operator,
                            f_init,
                            error_tol,
                            max_iter,
                            verbose,
                            10,
                            'iteration',
                            tree,
                            Tf=Tf)

    price = f * grid**gamma

    return price
Exemple #25
0
def WaldFriedman_Interactive(m):
    # NOTE: Could add sliders over other variables
    #       as well, but only doing over n for now

    # Choose parameters
    c = 1.25
    L0 = 25.0
    L1 = 25.0

    # Choose n points and distributions
    f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=2.0, b=2.5), 1e-6, np.inf)
    f0 = f0 / np.sum(f0)
    f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=2.5, b=2.0), 1e-6, np.inf)
    f1 = f1 / np.sum(f1)  # Make sure sums to 1

    # Create WaldFriedman class
    wf = WaldFriedman(c, L0, L1, f0, f1, m=m)

    # Solve via VFI
    # Solve using qe's `compute_fixed_point` function
    J = qe.compute_fixed_point(wf.bellman_operator,
                               np.zeros(m),
                               error_tol=1e-7,
                               verbose=False,
                               max_iter=1000)

    lb, ub = wf.find_cutoff_rule(J)

    # Plot
    fig, ax = plt.subplots(figsize=(8, 6))

    fig.suptitle("Value function", size=18)
    ax.set_xlabel("Probability of Model 0")
    ax.set_ylabel("Value Function")

    ax.set_xlim(0, 1.0)
    ax.set_ylim(0, 0.5 * max(L0, L1))
    ax.plot(wf.pgrid, J)

    ax.annotate(r"$\rho_2$", xy=(ub + 0.025, 0.5), size=14)
    ax.annotate(r"$\rho_1$", xy=(lb + 0.025, 0.5), size=14)
    ax.vlines(lb, 0.0, wf.payoff_choose_f1(lb), linestyle="--")
    ax.vlines(ub, 0.0, wf.payoff_choose_f0(ub), linestyle="--")

    fig.show()
def WaldFriedman_Interactive(m):
    # NOTE: Could add sliders over other variables
    #       as well, but only doing over n for now

    # Choose parameters
    c = 1.25
    L0 = 25.0
    L1 = 25.0

    # Choose n points and distributions
    f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=2.0, b=2.5), 1e-6, np.inf)
    f0 = f0 / np.sum(f0)
    f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=2.5, b=2.0), 1e-6, np.inf)
    f1 = f1 / np.sum(f1)  # Make sure sums to 1

    # Create WaldFriedman class
    wf = WaldFriedman(c, L0, L1, f0, f1, m=m)

    # Solve via VFI
    # Solve using qe's `compute_fixed_point` function
    J = qe.compute_fixed_point(wf.bellman_operator, np.zeros(m),
                               error_tol=1e-7, verbose=False,
                               max_iter=1000)

    lb, ub = wf.find_cutoff_rule(J)

    # Plot
    fig, ax = plt.subplots(figsize=(8, 6))

    fig.suptitle("Value function", size=18)
    ax.set_xlabel("Probability of Model 0")
    ax.set_ylabel("Value Function")

    ax.set_xlim(0, 1.0)
    ax.set_ylim(0, 0.5 * max(L0, L1))
    ax.plot(wf.pgrid, J)

    ax.annotate(r"$\beta$", xy=(ub+0.025, 0.5), size=14)
    ax.annotate(r"$\alpha$", xy=(lb+0.025, 0.5), size=14)
    ax.vlines(lb, 0.0, wf.payoff_choose_f1(lb), linestyle="--")
    ax.vlines(ub, 0.0, wf.payoff_choose_f0(ub), linestyle="--")

    fig.show()
Exemple #27
0
def compute_value_function_cached(grid, beta, alpha, shocks):
    """
    Compute the value function by iterating on the Bellman operator.
    The work is done by QuantEcon's compute_fixed_point function.
    """
    Tw = np.empty(len(grid))
    initial_w = 5 * np.log(grid) - 25

    v_star = compute_fixed_point(bellman_operator, 
            initial_w, 
            1e-4,  # error_tol
            100,   # max_iter
            True,  # verbose
            5,     # print_skip
            grid,
            beta,
            np.log,
            lambda k: k**alpha,
            shocks,
            Tw=Tw,
            compute_policy=False)
    return v_star
"""
Origin: QE by John Stachurski and Thomas J. Sargent
Filename: career_vf_plot.py
Authors: John Stachurski and Thomas Sargent
LastModified: 11/08/2013

"""

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
from matplotlib import cm
import quantecon as qe
from career import CareerWorkerProblem

# === solve for the value function === #
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N)) * 100
v = qe.compute_fixed_point(wp.bellman_operator, v_init)

# === plot value function === #
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
tg, eg = np.meshgrid(wp.theta, wp.epsilon)
ax.plot_surface(tg, eg, v.T, rstride=2, cstride=2, cmap=cm.jet, alpha=0.5, linewidth=0.25)
ax.set_zlim(150, 200)
ax.set_xlabel("theta", fontsize=14)
ax.set_ylabel("epsilon", fontsize=14)
plt.show()
"""
Origin: QE by John Stachurski and Thomas J. Sargent
Filename: ifp_savings_plots.py
Authors: John Stachurski, Thomas J. Sargent
LastModified: 11/08/2013

"""

from matplotlib import pyplot as plt
from quantecon import compute_fixed_point
from quantecon.models import ConsumerProblem

# === solve for optimal consumption === #
m = ConsumerProblem(r=0.03, grid_max=4)
v_init, c_init = m.initialize()

# Coleman Operator takes in (c)?
c = compute_fixed_point(m.coleman_operator, c_init)
a = m.asset_grid
R, z_vals = m.R, m.z_vals

# === generate savings plot === #
fig, ax = plt.subplots()
ax.plot(a, R * a + z_vals[0] - c[:, 0], label='low income')
ax.plot(a, R * a + z_vals[1] - c[:, 1], label='high income')
ax.plot(a, a, 'k--')
ax.set_xlabel('current assets')
ax.set_ylabel('next period assets')
ax.legend(loc='upper left')
plt.show()
def test_raises_value_error_nonpositive_max_iter():
    f = lambda x: 0.5*x
    init = 1.
    max_iter = 0
    fp = compute_fixed_point(f, init, max_iter=max_iter)
        V_upd[ik] = np.nanmax(M_X[ik, :, :])
        g_k[ik] = K[np.unravel_index(np.argmax(M_X[ik, :, :], axis=None),
                                     M_X[ik, :, :].shape)[0]]
        g_h[ik] = H[np.unravel_index(np.nanargmax(M_X[ik, :, :], axis=None),
                                     M_X[ik, :, :].shape)[1]]

    if return_policies == True:
        return V_upd, g_k, g_h
    else:
        return V_upd


qe.tic()
V = qe.compute_fixed_point(Bellman,
                           V0,
                           max_iter=2000,
                           error_tol=0.001,
                           print_skip=20)
V, g_k, g_h = Bellman(V, return_policies=True)
qe.toc()

## Plotting the value function:

plt.plot(K, V)
plt.xlabel('k')
plt.ylabel('v(k)')
plt.title('Value function')
plt.show()

## Plotting the policy functions:
L0 = 25
L1 = 25
a0, b0 = 2.5, 2.0
a1, b1 = 2.0, 2.5
m = 25

f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a0, b=b0), 1e-6, np.inf)
f0 = f0 / np.sum(f0)
f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a1, b=b1), 1e-6, np.inf)
f1 = f1 / np.sum(f1)  # Make sure sums to 1

# Create an instance of our WaldFriedman class
wf = WaldFriedman(c, L0, L1, f0, f1, m=m)
# Solve using qe's `compute_fixed_point` function
J = qe.compute_fixed_point(wf.bellman_operator, np.zeros(m),
                           error_tol=1e-7, verbose=False,
                           print_skip=10, max_iter=500)
lb, ub = wf.find_cutoff_rule(J)

# Get draws
ndraws = 500
cdist, tdist = wf.stopping_dist(ndraws=ndraws)

fig, ax = plt.subplots(2, 2, figsize=(12, 9))

ax[0, 0].plot(f0, label=r"$f_0$")
ax[0, 0].plot(f1, label=r"$f_1$")
ax[0, 0].set_ylabel(r"probability of $z_k$", size=14)
ax[0, 0].set_xlabel(r"$k$", size=14)
ax[0, 0].set_title("Distributions", size=14)
ax[0, 0].legend(fontsize=14)
        # Payoff of choosing model 0
        p_c_0 = expect_loss_choose_0(p, L0)
        p_c_1 = expect_loss_choose_1(p, L1)
        p_con = expect_loss_cont(p, c, f0, f1, J_interp)
        
        J_out[p_ind] = min(p_c_0, p_c_1, p_con)

    return J_out


#  == Now run at given parameters == #

#  First set up distributions 
p_m1 = np.linspace(0, 1, 50)
f0 = np.clip(st.beta.pdf(p_m1, a=1, b=1), 1e-8, np.inf)
f0 = f0 / np.sum(f0)
f1 = np.clip(st.beta.pdf(p_m1, a=9, b=9), 1e-8, np.inf)
f1 = f1 / np.sum(f1)

# Build a grid
pg = np.linspace(0, 1, 251)
# Turn the Bellman operator into a function with one argument
bell_op = lambda vf: bellman_operator(pg, 0.5, f0, f1, 5.0, 5.0, vf)
# Pass it to qe's built in iteration routine
J = qe.compute_fixed_point(bell_op, 
                            np.zeros(pg.size),  # Initial guess
                            error_tol=1e-6, 
                            verbose=True, 
                            print_skip=5)

Exemple #34
0
Authors: John Stachurski and Thomas Sargent
LastModified: 11/08/2013

"""

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
from matplotlib import cm
import quantecon as qe
from quantecon.models import CareerWorkerProblem

# === solve for the value function === #
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N))*100
v = qe.compute_fixed_point(wp.bellman, v_init)

# === plot value function === #
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
tg, eg = np.meshgrid(wp.theta, wp.epsilon)
ax.plot_surface(tg,
                eg,
                v.T,
                rstride=2, cstride=2,
                cmap=cm.jet,
                alpha=0.5,
                linewidth=0.25)
ax.set_zlim(150, 200)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
Exemple #35
0
# STEP 6: We create a loop to iterate the Bellman equation until it report that the distance between two consecutives
# values of V is small enough, meanning that we have reached the SS.

start = timeit.default_timer()

def Bellman_Labor(V0): #Deffine a new funtion with lees loop cause take too much time
    V1 = np.zeros(dim)
    X = np.empty([dim,dim,dim])
    for i in range(dim):
        for j in range(dim):
             X[i,j,:] =  M[i,j,:] +beta*V0
                        
        V1[i] = np.nanmax(X[i,:,:])
    return V1

V = qe.compute_fixed_point(Bellman_Labor, V0, error_tol=0.05, max_iter=500,print_skip=50) #We use a predetermined method to find the fix point
V_labor = Bellman_Labor(V)

stop = timeit.default_timer()
time_labor = stop - start
print('Time - Endogenous Labor: '+str(time_labor))

# We plot our results for the value & policy functions

plt.plot(k,V_labor,label='value function')
plt.legend()
plt.title('Endogenous Labor',size=15)
plt.xlabel('k')
plt.ylabel('v(k)')
plt.show()
    def solve_model(self, tol=1e-7):
        J =  qe.compute_fixed_point(self.bellman_operator, np.zeros(self.m),
                                    error_tol=tol, verbose=False)

        self.J = J
        return J
Filename: odu_vfi_plots.py
Authors: John Stachurski and Thomas Sargent
"""

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
from scipy.interpolate import LinearNDInterpolator
import numpy as np
from quantecon import compute_fixed_point
from quantecon.models import SearchProblem


sp = SearchProblem(w_grid_size=100, pi_grid_size=100)
v_init = np.zeros(len(sp.grid_points)) + sp.c / (1 - sp.beta)
v = compute_fixed_point(sp.bellman_operator, v_init)
policy = sp.get_greedy(v)

# Make functions from these arrays by interpolation
vf = LinearNDInterpolator(sp.grid_points, v)
pf = LinearNDInterpolator(sp.grid_points, policy)

pi_plot_grid_size, w_plot_grid_size = 100, 100
pi_plot_grid = np.linspace(0.001, 0.99, pi_plot_grid_size)
w_plot_grid = np.linspace(0, sp.w_max, w_plot_grid_size)

#plot_choice = 'value_function'
plot_choice = 'policy_function'

if plot_choice == 'value_function':
    Z = np.empty((w_plot_grid_size, pi_plot_grid_size))
"""
Origin: QE by John Stachurski and Thomas J. Sargent
Filename: ifp_savings_plots.py
Authors: John Stachurski, Thomas J. Sargent
LastModified: 11/08/2013

"""

from matplotlib import pyplot as plt
from quantecon import compute_fixed_point
from quantecon.models import ConsumerProblem

# === solve for optimal consumption === #
m = ConsumerProblem(r=0.03, grid_max=4)
v_init, c_init = m.initialize()
c = compute_fixed_point(m.coleman_operator, c_init) #Coleman Operator takes in (c)?
a = m.asset_grid
R, z_vals = m.R, m.z_vals

# === generate savings plot === #
fig, ax = plt.subplots()
ax.plot(a, R * a + z_vals[0] - c[:, 0], label='low income')
ax.plot(a, R * a + z_vals[1] - c[:, 1], label='high income')
ax.plot(a, a, 'k--')
ax.set_xlabel('current assets')
ax.set_ylabel('next period assets')
ax.legend(loc='upper left')
plt.show()
Exemple #39
0
"""
Origin: QE by John Stachurski and Thomas J. Sargent
Filename: jv_test.py
Authors: John Stachurski and Thomas Sargent
LastModified: 11/08/2013

Tests jv.py with a particular parameterization.

"""
import matplotlib.pyplot as plt
from quantecon import compute_fixed_point, JvWorker

# === solve for optimal policy === #
wp = JvWorker(grid_size=25)
v_init = wp.x_grid * 0.5
V = compute_fixed_point(wp.bellman_operator, v_init, max_iter=40)
s_policy, phi_policy = wp.bellman_operator(V, return_policies=True)

# === plot policies === #
fig, ax = plt.subplots()
ax.set_xlim(0, max(wp.x_grid))
ax.set_ylim(-0.1, 1.1)
ax.plot(wp.x_grid, phi_policy, 'b-', label='phi')
ax.plot(wp.x_grid, s_policy, 'g-', label='s')
ax.legend()
plt.show()

Exemple #40
0
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
from matplotlib import cm
import quantecon as qe
from career import CareerWorkerProblem

# === set matplotlib parameters === #
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['patch.force_edgecolor'] = True

# === solve for the value function === #
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N)) * 100
v = qe.compute_fixed_point(wp.bellman_operator,
                           v_init,
                           max_iter=200,
                           print_skip=25)

# === plot value function === #
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
tg, eg = np.meshgrid(wp.theta, wp.epsilon)
ax.plot_surface(tg,
                eg,
                v.T,
                rstride=2,
                cstride=2,
                cmap=cm.jet,
                alpha=0.5,
                linewidth=0.25)
ax.set_zlim(150, 200)