Example #1
0
def reward(s, x, i, j):
    s = s - sbar  #  todo make sure they broadcast (:,ones(1,n))'
    f = np.zeros_like(s[0])
    for ii in range(2):
        for jj in range(2):
            f -= 0.5 * omega[ii, jj] * s[ii] * s[jj]
    fx = np.zeros_like(x)
    fxx = np.zeros_like(x)
    return f, fx, fxx
def reward(s, x, i, j):
    s = s - sbar  #  todo make sure they broadcast (:,ones(1,n))'
    f = np.zeros_like(s[0])
    for ii in range(2):
        for jj in range(2):
            f -= 0.5 * omega[ii, jj] * s[ii] * s[jj]
    fx = np.zeros_like(x)
    fxx = np.zeros_like(x)
    return f, fx, fxx
# Action Space
xmin = 0                      # minimum action
xmax = 6                      # maximum action
m = 100                    # number of actions
X = np.linspace(xmin, xmax, m)  # vector of actions


# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, S >= X[k]] = (X[k] ** (1 - gamma)) / (1 - gamma) - cost * X[k]

# State Transition Function

g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k]) ** 2
    g[k] = getindex(snext, S)



# Model Structure
model = DDPmodel(f, g, delta)
model.solve()
   

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
def bounds(s, i, j):
    return np.zeros_like(s), s[:]
def transition(s, q ,i, j, in_, e):
    return q.copy(), np.ones_like(q), np.zeros_like(q)
Example #6
0
def transition(s, q, i, j, in_, e):
    return q.copy(), np.ones_like(q), np.zeros_like(q)
Example #7
0
S = np.linspace(smin, smax, n)  # vector of states

# Action Space
xmin = 0  # minimum action
xmax = 6  # maximum action
m = 100  # number of actions
X = np.linspace(xmin, xmax, m)  # vector of actions

# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, S >= X[k]] = (X[k]**(1 - gamma)) / (1 - gamma) - cost * X[k]

# State Transition Function

g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k])**2
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S, X[model.policy])

# Plot Value Function
def bounds(s, i, j):
  return np.zeros_like(s), s.copy()
Example #9
0
def transition(s, q, i, j, in_, e):
    g = alpha * (s - q) - 0.5 * beta * (s - q)**2
    gx = -alpha + beta * (s - q)
    gxx = -beta * np.zeros_like(s)
    return g, gx, gxx
Example #10
0
def bounds(s, i, j):
    return np.zeros_like(s), s[:]  # important!!, pass a copy of s
Example #11
0
def transition(s, x, i, j, in_, e):
    g = alpha + beta @ s + gamma @ x + e
    gx = np.tile(gamma, (1, x.size))
    gxx = np.zeros_like(s)
    return g, gx, gxx
Example #12
0
def bounds(s, i, j):
    lb  = np.zeros_like(s[0])
    ub  = np.full(lb.shape, np.inf)
    return lb, ub
Example #13
0
def bounds(s, i, j):
    return np.zeros_like(s), 0.99 * s
Example #14
0
def transition(s, x, i, j, in_, e):
    g = alpha + beta @ s + gamma @ x + e
    gx = np.tile(gamma, (1, x.size))
    gxx = np.zeros_like(s)
    return g, gx, gxx
Example #15
0
def bounds(s, i, j):
    return np.zeros_like(s), s.copy()
f1 = lambda x: np.exp(-2 * x)
d1 = lambda x: -2 * np.exp(-2 * x)

# Fit approximant
n, a, b = 10, -1, 1
f1fit = BasisChebyshev(n, a, b, f=f1)


# Graph approximation error for function and derivative
axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]}
x = np.linspace(a, b, 1001)
fig = plt.figure(figsize=[12, 6])

ax1 = fig.add_subplot(121, title='Function approximation error', **axopts)
ax1.axhline(linestyle='--', color='gray', linewidth=2)
ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax1.plot(x, f1fit(x) - f1(x))

ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts)
ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2)
ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax2.plot(x, f1fit(x, 1) - d1(x))


''' Bivariate Interpolation '''
# Define function
f2 = lambda x: np.cos(x[0]) / np.exp(x[1])

# Set degree and domain interpolation
n, a, b = 7, 0.0, 1.0
f2fit = BasisChebyshev([n, n], a, b, f=f2)
Example #17
0
def transition(s, q, i, j, in_, e):
    g = s - q
    gx = -np.ones_like(s)
    gxx = np.zeros_like(s)
    return g, gx, gxx
Example #18
0
# Define function and derivative
f1 = lambda x: np.exp(-2 * x)
d1 = lambda x: -2 * np.exp(-2 * x)

# Fit approximant
n, a, b = 10, -1, 1
f1fit = BasisChebyshev(n, a, b, f=f1)

# Graph approximation error for function and derivative
axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]}
x = np.linspace(a, b, 1001)
fig = plt.figure(figsize=[12, 6])

ax1 = fig.add_subplot(121, title='Function approximation error', **axopts)
ax1.axhline(linestyle='--', color='gray', linewidth=2)
ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax1.plot(x, f1fit(x) - f1(x))

ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts)
ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2)
ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax2.plot(x, f1fit(x, 1) - d1(x))
''' Bivariate Interpolation '''
# Define function
f2 = lambda x: np.cos(x[0]) / np.exp(x[1])

# Set degree and domain interpolation
n, a, b = 7, 0.0, 1.0
f2fit = BasisChebyshev([n, n], a, b, f=f2)

# Nice plot of function approximation error
Example #19
0
def transition(s, x, i, j, in_, e):
    g = s - x + e
    gx = -np.ones_like(s)
    gxx = np.zeros_like(s)
    return g, gx, gxx
Example #20
0
def bounds(s, i, j):
    return np.zeros_like(s), s[:]  # important!!, pass a copy of s
Example #21
0
def bounds(s, i, j):
    return np.zeros_like(s), np.full(s.shape, np.inf)
Example #22
0
def transition(s, q, i, j, in_, e):
    g = alpha * (s - q) - 0.5 * beta * (s - q) ** 2
    gx = -alpha + beta * (s - q)
    gxx = -beta* np.zeros_like(s)
    return g, gx, gxx
Example #23
0
def bounds(s, i, j):
    return np.zeros_like(s), np.full(s.shape, np.inf)
demo.annotate(sstar, vstar,'$s^*$ = %.2f\n$V^*$ = %.2f' % (sstar, vstar),'bo', (10, -7))
plt.legend(['Chebychev Collocation','L-Q Approximation'], loc= 'upper left')



# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(Wealth, np.c_[pr, plq])
demo.annotate(sstar, pstar,'$s^*$ = %.2f\n$\lambda^*$ = %.2f' % (sstar, pstar), 'bo', (10, 7))
plt.legend(['Chebychev Collocation','L-Q Approximation'])


# Plot Chebychev Collocation Residual and Approximation Error
plt.figure(figsize=[12, 6])
demo.subplot(1, 2, 1, 'Chebychev Collocation Residual\nand Approximation Error', 'Wealth', 'Residual/Error')
plt.plot(Wealth, np.c_[S.resid, v - vtrue], Wealth, np.zeros_like(Wealth), 'k--')
plt.legend(['Residual','Error'], loc='lower right')

# Plot Linear-Quadratic Approximation Error
demo.subplot(1, 2, 2, 'Linear-Quadratic Approximation Error', 'Wealth', 'Error')
plt.plot(Wealth, vlq - vtrue)


# Plot State and Policy Paths
opts = dict(spec='r*', offset=(-5, -5), fs=11, ha='right')

data[['Wealth', 'Investment']].plot()
plt.title('State and Policy Paths')
demo.annotate(T, sstar, 'steady-state wealth\n = %.2f' % sstar, **opts)
demo.annotate(T, kstar, 'steady-state investment\n = %.2f' % kstar, **opts)
plt.xlabel('Period')
Example #25
0
def bounds(s, i, j):
    return np.zeros_like(s), 0.99 * s
Example #26
0
def bounds(s, i, j):
    lb = np.zeros_like(s[0])
    ub = np.full(lb.shape, np.inf)
    return lb, ub