Example #1
0
fig, axes = plt.subplots(2, 2)
axes = axes.flatten()
xmax = 6.5

for i in range(4):
    ax = axes[i] 
    ax.set_xlim(0, xmax)
    psi_0 = beta(5, 5, scale=0.5, loc=i*2)  # Initial distribution

    # == Generate matrix s.t. t-th column is n observations of k_t == #
    k = np.empty((n, T))
    A = phi.rvs((n, T))
    k[:, 0] = psi_0.rvs(n)
    for t in range(T-1):
        k[:, t+1] = s * A[:,t] * k[:, t]**alpha + (1 - delta) * k[:, t]

    # == Generate T instances of lae using this data, one for each t == #
    laes = [lae(p, k[:, t]) for t in range(T)]

    ygrid = np.linspace(0.01, xmax, 150)
    greys = [str(g) for g in np.linspace(0.0, 0.8, T)]
    greys.reverse()
    for psi, g in zip(laes, greys):
        ax.plot(ygrid, psi(ygrid), color=g, lw=2, alpha=0.6)
    #ax.set_xlabel('capital')
    #title = r'Density of $k_1$ (lighter) to $k_T$ (darker) for $T={}$'
    #ax.set_title(title.format(T))

plt.show()

Example #2
0
phi = norm()
n = 500
theta = 0.8
# == Frequently used constants == #
d = np.sqrt(1 - theta**2) 
delta = theta / d

def psi_star(y):
    "True stationary density of the TAR Model"
    return 2 * norm.pdf(y) * norm.cdf(delta * y) 

def p(x, y):
        "Stochastic kernel for the TAR model."
        return phi.pdf((y - theta * np.abs(x)) / d) / d

Z = phi.rvs(n)
X = np.empty(n)
for t in range(n-1):
    X[t+1] = theta * np.abs(X[t]) + d * Z[t]
psi_est = lae(p, X)
k_est = gaussian_kde(X)

fig, ax = plt.subplots()
ys = np.linspace(-3, 3, 200)
ax.plot(ys, psi_star(ys), 'b-', lw=2, alpha=0.6, label='true')
ax.plot(ys, psi_est(ys), 'g-', lw=2, alpha=0.6, label='look ahead estimate')
ax.plot(ys, k_est(ys), 'k-', lw=2, alpha=0.6, label='kernel based estimate')
ax.legend(loc='upper left')
plt.show()
theta = 0.8
# == Frequently used constants == #
d = np.sqrt(1 - theta**2)
delta = theta / d


def psi_star(y):
    "True stationary density of the TAR Model"
    return 2 * norm.pdf(y) * norm.cdf(delta * y)


def p(x, y):
    "Stochastic kernel for the TAR model."
    return phi.pdf((y - theta * np.abs(x)) / d) / d


Z = phi.rvs(n)
X = np.empty(n)
for t in range(n - 1):
    X[t + 1] = theta * np.abs(X[t]) + d * Z[t]
psi_est = lae(p, X)
k_est = gaussian_kde(X)

fig, ax = plt.subplots()
ys = np.linspace(-3, 3, 200)
ax.plot(ys, psi_star(ys), 'b-', lw=2, alpha=0.6, label='true')
ax.plot(ys, psi_est(ys), 'g-', lw=2, alpha=0.6, label='look ahead estimate')
ax.plot(ys, k_est(ys), 'k-', lw=2, alpha=0.6, label='kernel based estimate')
ax.legend(loc='upper left')
plt.show()