コード例 #1
0
    def A(self, g, Ag, std_range=3, shock_state_size=20):
        """
        Apply A to g and return Ag.  The argument g is a vector, which is
        converted to a function by linear interpolation.
        Integration uses Gaussian quadrature.

        """

        # Unpack parameters
        β, γ, ρ, σ, x0, α = self.β, self.γ, self.ρ, self.σ, self.x0, self.α 
        b, k0, k1 = self.b, self.k0, self.k1

        # Extract state and probs for N(0, 1) shocks
        mc = qe.tauchen(0, 1, std_range, shock_state_size)
        w_vec = mc.state_values
        p_vec = mc.P[0, :]  # Any row, all columns

        # Interpolate g and allocate memory for new g
        g_func = lambda x: np.interp(x, self.x_grid, g)

        # Apply the operator K to g, computing Kg and || Kg ||
        for (i, x) in enumerate(self.x_grid):
            mf = k0 * exp(k1 * x)
            Ag[i] = mf * np.dot(g_func(ρ * x + b + w_vec), p_vec)

        # Calculate the norm of Ag
        Ag_func = lambda x: np.interp(x, self.x_grid, Ag)
        r = np.sqrt(np.dot(Ag_func(self.sx_vec)**2, self.sp_vec))

        return r
コード例 #2
0
    def __init__(self, β=0.99,
                       γ=2.5,
                       ρ=0.9,
                       σ=0.002,
                       x0=0.1,
                       α=1,
                       grid_size=60):

        self.β, self.γ, self.ρ, self.σ = β, γ, ρ, σ
        self.α, self.x0 = α, x0

        # derived constants
        self.b = x0 + σ**2 * (1 - γ)
        self.k0 = β * exp(self.b * (1 - γ) + σ**2 * (1 - γ)**2 / 2)
        self.k1 = (ρ - α) * (1 - γ)

        # Parameters in the stationary distribution
        self.svar = σ**2 / (1 - ρ**2)
        self.ssd = sqrt(self.svar)
        self.smean = self.b / (1 - ρ)

        # A discrete approximation of the stationary dist 
        std_range, n = 3, 20
        mc = qe.tauchen(0, 1, std_range, n)
        w_vec = mc.state_values
        self.sx_vec = self.smean + self.ssd * w_vec
        self.sp_vec = mc.P[0, :]  # Any row

        # A grid of points for interpolation
        a, b = self.smean + 3 * self.ssd, self.smean - 3 * self.ssd
        self.x_grid = np.linspace(a, b, grid_size)
コード例 #3
0
    def __init__(self, β=0.96, mc=None, γ=2.0, g=np.exp):
        self.β, self.γ = β, γ
        self.g = g

        # == A default process for the Markov chain == #
        if mc is None:
            self.ρ = 0.9
            self.σ = 0.02
            self.mc = qe.tauchen(self.ρ, self.σ, n=25)
        else:
            self.mc = mc

        self.n = self.mc.P.shape[0]
コード例 #4
0
    def __init__(self, beta=0.96, mc=None, gamma=2.0, g=np.exp):
        self.beta, self.gamma = beta, gamma
        self.g = g

        # == A default process for the Markov chain == #
        if mc is None:
            self.rho = 0.9
            self.sigma = 0.02
            self.mc = qe.tauchen(self.rho, self.sigma, n=25)
        else:
            self.mc = mc

        self.n = self.mc.P.shape[0]
コード例 #5
0
    def __init__(self, beta=0.96, mc=None, gamma=2.0, g=np.exp):
        self.beta, self.gamma = beta, gamma
        self.g = g

        # == A default process for the Markov chain == #
        if mc is None:
            self.rho = 0.9
            self.sigma = 0.02
            self.mc = qe.tauchen(self.rho, self.sigma, n=25)
        else:
            self.mc = mc

        self.n = self.mc.P.shape[0]
コード例 #6
0
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe

mc = qe.tauchen(0.96, 0.25, n=25)  
sim_length = 80

x_series = mc.simulate(sim_length, init=np.median(mc.state_values))
lambda_series = np.exp(x_series)
d_series = np.cumprod(lambda_series) # assumes d_0 = 1

fig, axes = plt.subplots(2, 2)
axes[0, 0].plot(x_series, 'b-', lw=2, label=r'$X_t$')
axes[0, 1].plot(lambda_series, 'b-', lw=2, label=r'$g_t$')
axes[1, 0].plot(d_series, 'b-', lw=2, label=r'$d_t$')
axes[1, 1].plot(np.log(d_series), 'b-', lw=2, label=r'$\log \, d_t$')
for ax in axes.flatten():
    ax.legend(loc='upper left', frameon=False)
plt.tight_layout()
plt.show()
コード例 #7
0
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
from scipy.linalg import solve, eigvals

n = 25  # size of state space
beta = 0.9
mc = qe.tauchen(0.96, 0.02, n=n)  

K = mc.P * np.exp(mc.state_values)

warning_message = "Spectral radius condition fails"
assert np.max(np.abs(eigvals(K))) < 1 / beta,  warning_message

I = np.identity(n)
v = solve(I - beta * K, beta * K @ np.ones(n))

fig, ax = plt.subplots()
ax.plot(mc.state_values, v, 'g-o', lw=2, alpha=0.7, label=r'$v$')
ax.set_ylabel("price-dividend ratio")
ax.set_xlabel("state")
ax.legend(loc='upper left')
plt.show()
コード例 #8
0
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
from scipy.linalg import solve, eigvals

n = 25  # size of state space
beta = 0.9
mc = qe.tauchen(0.96, 0.1, n=n)  

K = mc.P * mc.state_values

warning_message = "Spectral radius condition fails"
assert np.max(np.abs(eigvals(K))) < 1 / beta,  warning_message

I = np.identity(n)
v = solve(I - beta * K, beta * K @ np.ones(n))

fig, ax = plt.subplots()
ax.plot(mc.state_values, v, 'g-o', lw=2, alpha=0.7, label=r'$v$')
ax.set_ylabel("price-dividend ratio")
ax.set_xlabel("state")
ax.legend(loc='upper left')
plt.show()
コード例 #9
0
    def __init__(
            self,
            sigma=2,  # inverse intertemporal elasticity
            r=0.03,  # rate on assets
            beta=0.95,  # discount rate
            rmmin=0.05,  # mortgage rate min
            rmmax=0.06,  # mortgage rate max
            rmsize=2,  # number of rms
            gamma=0.8,  # ltv ratio
            mu=0.025,  # house price growth (which is equal to income growth)
            xmin=-1.025,  # inverse accumulated equity min
            xmax=0.45,
            xsize=60,
            amin=0,
            amax=1,
            asize=64,
            ymin=-0.5,
            ymax=0.5,
            ysize=46,
            sigmay=0.1,
            sigmap=0.065,
            yshocksize=3,
            pshocksize=3,
            Fnodes=np.array([0.105, .052]),
            probF=np.array([0.875, 0.125]),
    ):

        # == assigning paramters to "self" == #

        (self.sigma, self.r, self.beta, self.rmmin, self.rmmax, self.rmsize,
         self.gamma, self.mu, self.xmin, self.xmax, self.xsize, self.amin,
         self.amax, self.asize, self.ymin, self.ymax, self.ysize, self.sigmay,
         self.sigmap, self.yshocksize, self.pshocksize, self.Fnodes,
         self.probF) = (sigma, r, beta, rmmin, rmmax, rmsize, gamma, mu, xmin,
                        xmax, xsize, amin, amax, asize, ymin, ymax, ysize,
                        sigmay, sigmap, yshocksize, pshocksize, Fnodes, probF)

        # == getting grids == #

        rmnodes = self.rmnodes = np.linspace(rmmin, rmmax, rmsize)
        ynodes = np.linspace(ymin, ymax, ysize)

        # xgrid
        xnodes = np.linspace(xmin, xmax, xsize)

        # agrid
        # this is not evenly spaced. More nodes at the lower a values
        anodes = np.empty(asize)
        for i in range(asize):
            anodes[i] = (1.0 / (asize - 1)) * (1.0 * i - 1.0)
        for i in range(asize):
            anodes[i] = np.exp(
                np.log(amax - amin + 1) * anodes[i]) + amin - 1.0

        self.anodes = anodes

        # == getting grids and probabilities for shocks == #

        mc_y = qe.tauchen(0, sigmay, n=yshocksize)
        self.probyshock = probyshock = mc_y.P[0, :]
        self.yshocknodes = yshocknodes = mc_y.state_values
        self.probyshock_cum = probyshock_cum = np.cumsum(self.probyshock)

        mc_p = qe.tauchen(0, sigmap, n=pshocksize)
        self.probpshock = probpshock = mc_p.P[0, :]
        self.pshocknodes = pshocknodes = mc_p.state_values
        self.probpshock_cum = probpshock_cum = np.cumsum(self.probpshock)

        # defining the location of the the x value closest to 0
        # (used when constructing refinance value function)
        self.xreset = np.argmin(np.abs(xnodes))

        # == creating vectors to find closest match after a shock for a, x and y == #
        # These are index values for a given shock and a given level of the variable
        # For example, for a given shock and a given asset value, where is the closest
        # recorded asset value in my grid which corresponds to the resulting asset value
        # from the equation

        xnearest = np.empty((xsize, pshocksize), dtype=int)
        for i in range(xsize):
            for j in range(pshocksize):
                xnearest[i, j] = int(
                    np.argmin(
                        np.abs((xnodes[i] - mu - pshocknodes[j]) - xnodes)))

        anearest = np.empty((asize, pshocksize), dtype=int)
        for i in range(asize):
            for j in range(pshocksize):
                anearest[i, j] = int(
                    np.argmin(
                        np.abs((anodes[i] * np.exp(-mu - pshocknodes[j])) -
                               anodes)))

        ynearest = np.empty((ysize, pshocksize, yshocksize), dtype=int)
        for i in range(ysize):
            for j in range(pshocksize):
                for k in range(yshocksize):
                    ynearest[i, j, k] = int(
                        np.argmin(
                            np.abs((ynodes[i] + yshocknodes[k] -
                                    pshocknodes[j]) - ynodes)))

        self.xnearest, self.anearest, self.ynearest = xnearest, anearest, ynearest

        # "unlogging" x and y nodes
        self.xnodes = np.exp(xnodes)
        self.ynodes = np.exp(ynodes)
コード例 #10
0
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
from scipy.linalg import solve, eigvals

n = 25  # size of state space
beta = 0.9
mc = qe.tauchen(0.96, 0.02, n=n)

K = mc.P * np.exp(mc.state_values)

warning_message = "Spectral radius condition fails"
assert np.max(np.abs(eigvals(K))) < 1 / beta, warning_message

I = np.identity(n)
v = solve(I - beta * K, beta * K @ np.ones(n))

fig, ax = plt.subplots()
ax.plot(mc.state_values, v, 'g-o', lw=2, alpha=0.7, label=r'$v$')
ax.set_ylabel("price-dividend ratio")
ax.set_xlabel("state")
ax.legend(loc='upper left')
plt.show()
コード例 #11
0
"""
Plot the dividend process and the state process for the Markov asset pricing
lecture.

"""
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe

mc = qe.tauchen(0.96, 0.25, n=25)  
sim_length = 80

x_series = mc.simulate(sim_length, init=np.median(mc.state_values))
lambda_series = np.exp(x_series)
d_series = np.cumprod(lambda_series) # assumes d_0 = 1

fig, axes = plt.subplots(2, 2)
axes[0, 0].plot(x_series, 'b-', lw=2, label=r'$X_t$')
axes[0, 1].plot(lambda_series, 'b-', lw=2, label=r'$g_t$')
axes[1, 0].plot(d_series, 'b-', lw=2, label=r'$d_t$')
axes[1, 1].plot(np.log(d_series), 'b-', lw=2, label=r'$\log \, d_t$')
for ax in axes.flatten():
    ax.legend(loc='upper left', frameon=False)
plt.tight_layout()
plt.show()
コード例 #12
0
        def __init__(self, config):
            parameters = config['parameters']

            # Labor shock process
            self.labour_mc = tauchen(parameters['phi_w'],
                                     parameters['sigma_w'],
                                     n=int(parameters['grid_size_W']))
            self.E = self.labour_mc.state_values
            self.P_E = self.labour_mc.P
            self.P_stat = self.labour_mc.stationary_distributions[0]

            #  beta and alpha processes
            #  Recall that beta_hat are values of ln beta_{t} - ln beta_\bar
            #  alpha_hat are values of ln beta_{t} - ln beta_\bar
            self.beta_mc = tauchen(parameters['rho_beta'],
                                   parameters['sigma_beta'],
                                   n=int(parameters['grid_size_beta']))
            self.beta_hat = self.beta_mc.state_values
            self.P_beta = self.beta_mc.P
            self.beta_stat = self.beta_mc.stationary_distributions[0]
            self.alpha_mc = tauchen(parameters['rho_alpha'],
                                    parameters['sigma_alpha'],
                                    n=int(parameters['grid_size_alpha']))
            self.alpha_hat, self.P_alpha = self.alpha_mc.state_values, \
                self.alpha_mc.P
            self.alpha_stat = self.alpha_mc.stationary_distributions[0]

            self.beta = np.inner(
                np.exp(self.beta_hat + np.log(parameters['beta_bar'])),
                self.beta_stat)
            self.alpha_housing = np.inner(
                np.exp(self.alpha_hat + np.log(parameters['alpha_bar'])),
                self.alpha_stat)

            # Pension asset returns shock processes
            lnrh_sd = parameters['sigma_d'] * (parameters['h']**2)
            lnrh_mc = tauchen(0, lnrh_sd, n=int(parameters['grid_size_DCR']))
            X_rh, P_rh = lnrh_mc.state_values, lnrh_mc.P[0]
            self.X_rh = np.exp(np.log(parameters['r_h']) + X_rh)
            self.P_rh = P_rh
            lnrl_sd = parameters['sigma_d'] * (parameters['l']**2)

            lnrl_mc = tauchen(0, lnrl_sd, n=int(parameters['grid_size_DCR']))
            X_rl, P_rl = lnrl_mc.state_values, lnrl_mc.P[0]
            self.X_rl = np.exp(np.log(parameters['r_l']) + X_rl)
            self.P_rl = P_rl

            # Cartesian grid of realisatons from high and low risk asset
            self.X_r = cartesian([self.X_rl, self.X_rh])
            P_tmp = cartesian([self.P_rl, self.P_rh])

            # Joint probability array of high/low return realisations
            self.P_r = np.zeros(len(self.X_r))
            for i in range(len(self.P_r)):
                self.P_r[i] = P_tmp[i][0] * P_tmp[i][1]

            # housing return shocks
            self.Q_shocks_mc = tauchen(0,
                                       parameters['sigma_r_H'],
                                       n=int(parameters['grid_size_Q_s']))
            self.Q_shocks_r = self.Q_shocks_mc.state_values
            self.Q_shocks_P = self.Q_shocks_mc.P[0]