Esempio n. 1
0
pr = np.linspace(pmin, pmax, 10 * n)

# Plot Action-Contingent Value Functions

pp = demo.qplot('unit profit', 'value_j', 'i',
      data=S,
      main='Action-Contingent Value Functions',
      xlab='Net Unit Profit',
      ylab='Value')


print(pp)



'''

# color_list = plt.cm.Set3(np.linspace(0, 1, 8))

demo.figure('Action-Contingent Value Functions', 'Net Unit Profit', 'Value')
plt.plot(pr, model.Value_j[:, 'keep'](pr).T)
plt.legend(model.labels.i, loc='upper left')


# Compute and Plot Critical Unit Profit Contributions
vr = model.Value_j(pr)

print('Critical Replacement Profit\n')
for a in range(A-1):
    pcrit = np.interp(0.0, vr[a, 1] - vr[a, 0], pr, np.nan, np.nan)  # interp only works with increasing x data
    vcrit = np.interp(pcrit, pr, vr[a, 0])
Esempio n. 2
0
# Check Model Derivatives
# dpcheck(model,smax,0)

## SOLUTION

# Solve Bellman Equation
model.solve()
resid, s, v, q = model.residuals()

# Compute and print abandonment point
sstar = (b[0] - a[0]) / b[1]
print('Abandonment Point = %5.2f' % sstar)

# Plot Optimal Policy
demo.figure('Optimal Extraction', 'Ore Stock', 'Ore Extracted')
plt.plot(s, q.T)

# Plot Value Function
demo.figure('Value Function', 'Ore Stock', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Ore Stock', 'Shadow Price')
plt.plot(s, model.Value(s, 1))

# Plot Residual
demo.figure('Bellman Equation Residual', 'Ore Stock', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, 0, smax, 'k', '--')
Esempio n. 3
0
    else:
        return s + gamma * (smax - s)


model = DPmodel(basis,
                reward,
                transition,
                discount=delta,
                j=['keep', 'clear cut'])

model.solve()
resid, sr, vr = model.residuals()

# Plot Action-Contingent Value Functions

demo.figure('Action-Contingent Value Functions', 'Biomass', 'Value of Stand')
plt.plot(sr, vr.T)
plt.legend(model.labels.j, loc='upper center')

# Compute and Plot Optimal Harvesting Stock Level
scrit = np.interp(0.0, vr[1] - vr[0], sr)
vcrit = np.interp(scrit, sr, vr[0])
demo.annotate(scrit,
              vcrit,
              '$s^* = {:.2f}$'.format(scrit),
              'wo', (-5, 5),
              fs=12)

print('Optimal Biomass Harvesting Level = {:5.2f}'.format(scrit))

# Plot Residual
for k in range(m):
    f[k] = ((S * (1 - X[k]))**(1 - alpha)) / (1 - alpha)

# State Transition Function
g = np.empty_like(f)
for k in range(m):
    snext = gamma * X[k] * S + (X[k] * S)**beta
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta).solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)
Esempio n. 5
0
# ============  Compute Linear-Quadratic Approximation
growth_model.lqapprox(sstar, kstar)
vlq, plq = growth_model.Value(S.Wealth, order)
klq = growth_model.Policy(S.Wealth)

# ============   Compute Analytic Solution
vtrue = vstar + b * (np.log(S.Wealth) - np.log(sstar))


# ==============   Make plots:
Wealth = S.Wealth.T


# Plot Optimal Policy
demo.figure('Optimal Investment Policy', 'Wealth', 'Investment')
plt.plot(Wealth, np.c_[k, klq])
demo.annotate(sstar, kstar,'$s^*$ = %.2f\n$k^*$ = %.2f' % (sstar, kstar), 'bo', (10, -7))
plt.legend(['Chebychev Collocation','L-Q Approximation'], loc = 'upper left')


# Plot Value Function
demo.figure('Value Function', 'Wealth', 'Value')
plt.plot(Wealth, np.c_[v, vlq])
demo.annotate(sstar, vstar,'$s^*$ = %.2f\n$V^*$ = %.2f' % (sstar, vstar),'bo', (10, -7))
plt.legend(['Chebychev Collocation','L-Q Approximation'], loc= 'upper left')



# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
Esempio n. 6
0
# Print Steady-States
print('Steady States')
print('\tStock   = %5.2f' % sstar)
print('\tHarvest = %5.2f' % qstar)

# Check Model Derivatives
# dpcheck(model,sstar,qstar)

## SOLUTION

# Solve Bellman Equation
model.solve()
resid, s, v, q = model.residuals()

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(s, q.T)

# Plot Value Function
demo.figure('Value Function', 'Stock', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Stock', 'Shadow Price')
plt.plot(s, model.Value(s, 1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Stock', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, smin, smax, 'k', '--')
    snext = S[i] - 1 + e[k]
    j = getindex(snext, S)
    P[k, i, j] += p[k] * q[k]
    # survives predation, finds no food
    snext = S[i] - 1
    j = getindex(snext, S)
    P[k, i, j] += p[k] * (1 - q[k])


# Terminal Value Function
vterm = np.ones(n)            # terminal value: survive
vterm[0] = 0                 # terminal value: death

# Model Structure
model = DDPmodel(f, P, 1, T, vterm=vterm)
model.solve()

## Analysis

lims = [-0.5, emax + 0.5], [0, 1]

# Plot Survial Probabilities, Period 0
demo.figure('Survival Probability (Period 0)', 'Stock of Energy', 'Probability', *lims)
plt.bar(S, model.value[0], 1)

# Plot Survial Probabilities, Period 5
demo.figure('Survival Probability (Period 5)', 'Stock of Energy', 'Probability', *lims)
plt.bar(S, model.value[5], 1)

plt.show()
Esempio n. 8
0
# Deterministic Steady-State
kstar = ((1 - delta * gamma) / (delta * beta)) ** (1 / (beta - 1))  # determistic steady-state capital investment
sstar = gamma * kstar + kstar ** beta       	# deterministic steady-state wealth

# Check Model Derivatives
# dpcheck(model,sstar,kstar)


## SOLUTION

# Solve Bellman Equation
growth.solve()
resid, s, v, k = growth.solution()

# Plot Optimal Policy
demo.figure('Optimal Investment Policy',  'Wealth', 'Investment')
plt.plot(s, k.T)

# Plot Value Function
demo.figure('Value Function', 'Wealth', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(s, growth.Value(s, order=1).T)



# Plot Residual
demo.figure('Bellman Equation Residual', 'Wealth', 'Residual')
plt.plot(s, resid.T)
Esempio n. 9
0
    f[k] = ((S * (1 - X[k])) ** (1-alpha)) / (1 - alpha)


# State Transition Function
g = np.empty_like(f)
for k in range(m):
    snext = gamma * X[k] * S + (X[k] * S) ** beta
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta).solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)


# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)
Esempio n. 10
0
# Check Model Derivatives
# dpcheck(model,smax,0)


## SOLUTION

# Solve Bellman Equation
model.solve()
resid, s, v, q = model.solution()

# Compute and print abandonment point
sstar = (b[0] - a[0]) / b[1]
print('Abandonment Point = %5.2f' % sstar)

# Plot Optimal Policy
demo.figure('Optimal Extraction', 'Ore Stock', 'Ore Extracted')
plt.plot(s, q.T)

# Plot Value Function
demo.figure('Value Function', 'Ore Stock', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Ore Stock', 'Shadow Price')
plt.plot(s, model.Value(s, 1))

# Plot Residual
demo.figure('Bellman Equation Residual', 'Ore Stock', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, 0, smax,'k', '--')
Esempio n. 11
0
    q = Q(p)
    return p + q / (-3.5 * p**(-4.5)) - np.sqrt(q) - q**2


# Approximation structure
n, a, b = 21, 0.5, 2.5
Q = BasisChebyshev(n, a, b)
c0 = np.zeros(n)
c0[0] = 2
p = Q.nodes

# Solve for effective supply function
monopoly = NLP(resid)
Q.c = monopoly.broyden(c0)

# Setup plot
nplot = 1000
p = nodeunif(nplot, a, b)
rplot = resid(Q.c)

# Plot effective supply
demo.figure("Monopolist's Effective Supply Curve", 'Quantity', 'Price')
plt.plot(Q(p), p)

# Plot residual
demo.figure('Functional Equation Residual', 'Price', 'Residual')
plt.hlines(0, a, b, 'k', '--')
plt.plot(p, rplot)

plt.show()
Esempio n. 12
0
    # Chebychev-node Chebychev-basis approximant
    yapprox = BasisChebyshev(n[i], a, b, f=runge)
    yfit = yapprox(
        x)  # [0] no longer needed?  # index zero is to eliminate one dimension
    phi = yapprox.Phi()
    errcheb[i] = yfit - y
    nrmcheb[i] = np.log10(norm(yfit - y, np.inf))
    concheb[i] = np.log10(cond(phi, 2))

# Plot Chebychev- and uniform node polynomial approximation errors

# In[8]:

figs = []
figs.append(demo.figure("Runge's Function", '', 'y'))
plt.plot(x, y)
plt.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)
plt.xticks = []

# In[9]:

figs.append(
    demo.figure(
        "Runge's Function $11^{th}$-Degree\nPolynomial Approximation Error.",
        'x', 'Error'))
plt.hlines(0, a, b, 'gray', '--')
plt.plot(x, errcheb[4], label='Chebychev Nodes')
plt.plot(x, errunif[4], label='Uniform Nodes')
plt.legend(loc='upper center')
Esempio n. 13
0
m = len(X)               	# number of actions

# Reward Function
f = np.zeros((m,n))
f[1] = strike - price

# State Transition Probability Matrix
P = np.zeros((m, n, n))

for i in range(n):
    P[0, i, min(i + 1, n - 1)] = q
    P[0, i, max(i - 1, 0)] = 1 - q

# Model Structure
model = DDPmodel(f, P, delta, horizon=N)
model.solve()
   
## Analysis

# Plot Optimal Exercise Boundary
i, j = np.where(np.diff(model.policy[:-1], 1))
temp = (i * tau)[::-1]
demo.figure('Put Option Optimal Exercise Boundary', 'Time to Maturity', 'Asset Price')
plt.plot(temp, price[j])

# Plot Option Premium vs. Asset Price
demo.figure('Put Option Value', 'Asset Price', 'Premium', [0, 2 * strike])
plt.plot([0, strike],[strike, 0], 'k--', lw=2)
plt.plot(price, model.value[0], lw=3)

plt.show()
Esempio n. 14
0

# State Transition Function
g = np.empty_like(f)
g[0] = getindex(np.c_[S1 + 1, S2], S)
g[1] = getindex(np.c_[S1 + 1, S2 + 1], S)
g[2] = getindex(np.c_[1, 0], S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()
   


## Analysis

# Simulate Model
sinit = 0
nyrs = 12
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(sinit, nyrs)

# Plot State Path (Age)
demo.figure('Optimal State Path', 'Year', 'Age of Asset', [0, 12])
plt.plot(t, S1[spath])

# Plot State Path (Servicings)
demo.figure('Optimal State Path', 'Year', 'Number of Servicings', [0, 12], [0, 2.25])
plt.plot(t, S2[spath])

plt.show()
Esempio n. 15
0
#resid, sr, vr = model.residuals(10)

# Compute and Print Critical Action Wages

wcrit0 = np.interp(0, vr[0, 1] - vr[0, 0], sr)
vcrit0 = np.interp(wcrit0, sr, vr[0, 0])
print('Critical Search Wage = {:5.1f}'.format(wcrit0))

wcrit1 = np.interp(0, vr[1, 1] - vr[1, 0], sr)
vcrit1 = np.interp(wcrit1, sr, vr[1, 0])
print('Critical Quit Wage   = {:5.1f}'.format(wcrit1))

# Plot Action-Contingent Value Function - Unemployed

demo.figure('Action-Contingent Value, Unemployed', 'Wage', 'Value')
plt.plot(sr, vr[0].T)
demo.annotate(wcrit0,
              vcrit0,
              '$w^*_0 = {:.1f}$'.format(wcrit0),
              'wo', (5, -5),
              fs=12)
plt.legend(['Do Not Search', 'Search'], loc='upper left')

# Plot Action-Contingent Value Function - Unemployed

demo.figure('Action-Contingent Value, Employed', 'Wage', 'Value')
plt.plot(sr, vr[1].T)
demo.annotate(wcrit1,
              vcrit1,
              '$w^*_0 = {:.1f}$'.format(wcrit1),
Esempio n. 16
0
X = ['Keep', 'Replace']      # keep or replace
m = len(X)                   # number of actions

# Reward Function
f = np.empty((m, n))
y = -0.2 * S ** 2 + 2 * S + 8  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, -1] = -np.inf               # force replace at lactation 10

# State Transition Function
g = np.ones_like(f)
g[0] = np.minimum(np.arange(n) + 1, n - 1)  # Raise lactation number by 1, if keep


# Model Structure

model = DDPmodel(f, g, delta)
model.solve()

# Plot Optimal Policy
demo.figure('Optimal Replacement', 'Age', 'Optimal Decision', [0, n + 1], [-0.5, 1.5])
plt.plot(S, model.policy, '*', markersize=15)
plt.yticks((0, 1), X)

# Plot Value Function
demo.figure('Optimal Value in Cow Replacement', 'Age', 'Value (thousands)')
plt.plot(S, model.value / 1000)

plt.show()
Esempio n. 17
0
    # Chebychev-node Chebychev-basis approximant
    yapprox = BasisChebyshev(n[i], a, b, f=runge)
    yfit = yapprox(x)  # [0] no longer needed?  # index zero is to eliminate one dimension
    phi = yapprox.Phi()
    errcheb[i] = yfit - y
    nrmcheb[i] = np.log10(norm(yfit - y, np.inf))
    concheb[i] = np.log10(cond(phi, 2))


# Plot Chebychev- and uniform node polynomial approximation errors

# In[8]:


figs = []
figs.append(demo.figure("Runge's Function", '', 'y'))
plt.plot(x, y)
plt.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)
plt.xticks=[]


# In[9]:


figs.append(demo.figure("Runge's Function $11^{th}$-Degree\nPolynomial Approximation Error.",'x', 'Error'))
plt.hlines(0, a, b, 'gray', '--')
plt.plot(x, errcheb[4], label='Chebychev Nodes')
plt.plot(x, errunif[4], label='Uniform Nodes')
plt.legend(loc='upper center')

Esempio n. 18
0
# Reward Function
f = np.zeros((m, n))
f[0] = 50 - 2.5 * S - 2.5 * S**2
f[1] = 50 - repcost
f[0, -1] = -np.inf

# State Transition Function
g = np.zeros_like(f)
g[0] = np.arange(1, n + 1)
g[0, -1] = n - 1  # adjust last state so it doesn't go out of bounds

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis

# Simulate Model
sinit, nyrs = S.min() - 1, 12
t = np.arange(1 + nyrs)
spath, xpath = model.simulate(sinit, nyrs)

# Plot Optimal Value
demo.figure('Optimal Value Function', 'Age of Machine', 'Value')
plt.plot(S, model.value)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Age of Machine', [0, 12])
plt.plot(t, S[spath])

plt.show()
Esempio n. 19
0
for r, x in enumerate(X):
    snext = S - x
    g[r] = getindex(snext, S)


# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

# Analysis

# Simulate Model
sinit = S.max()
nyrs = 15
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(sinit, nyrs)

# Plot Optimal Policy
demo.figure('Optimal Extraction Policy', 'Stock', 'Extraction')
plt.plot(S, X[model.policy])

# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S, model.value)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

plt.show()
Esempio n. 20
0
# ... and one may even evaluate the approximant's definite integral between the left endpoint a and x:
int = F(x, -1)
print('\nThe approximate integral of exp(-x) between x=-1 and x=0 is {:.15f}'.
      format(int))
print("The  'exact'  integral  of  exp(-x) between x=-1 and x=0 is {:.15f}".
      format(np.exp(1) - 1))

# One may evaluate the accuracy of the Chebychev polynomial approximant by
# computing the approximation error on a highly refined grid of points:
ngrid = 5001  # number of grid nodes
xgrid = np.linspace(a, b, ngrid)  # generate refined grid for plotting
yapp = F(xgrid)  # approximant values at grid nodes
yact = f(xgrid)  # actual function values at grid points

demo.figure('Chebychev Approximation Error for exp(-x)', 'x', 'Error')
plt.plot(xgrid, yapp - yact)
plt.plot(xgrid, np.zeros(ngrid), 'k--', linewidth=2)

# The plot indicates that an order 10 Chebychev approximation scheme, produces approximation errors
# no bigger in magnitude than 6x10^-10. The approximation error exhibits the "Chebychev equioscillation
# property", oscilating relatively uniformly throughout the approximation domain.
#
# This commonly occurs when function being approximated is very smooth, as is the case here but should not
# be expected when the function is not smooth.  Further notice how the approximation error is exactly 0 at the
# approximation nodes --- which is true by contruction.

# Let us repeat the approximation exercise, this time constructing a
# 21-function cubic spline approximant:
n = 21  # order of approximation
S = BasisSpline(n, a, b, f=f)  # define basis
Esempio n. 21
0
print('Steady States')
print('\tStock   = %5.2f' % sstar)
print('\tHarvest = %5.2f' % qstar)

# Check Model Derivatives
# dpcheck(model,sstar,qstar)


## SOLUTION

# Solve Bellman Equation
model.solve()
resid, s, v, q = model.solution()

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(s, q.T)

# Plot Value Function
demo.figure('Value Function', 'Stock', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Stock', 'Shadow Price')
plt.plot(s, model.Value(s, 1).T)

# Plot Residual
demo.figure('Bellman Equation Residual','Stock', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, smin, smax, 'k', '--')
Esempio n. 22
0
m = len(X)  # number of actions

# Reward Function
f = np.empty((m, n))
y = -0.2 * S**2 + 2 * S + 8  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, -1] = -np.inf  # force replace at lactation 10

# State Transition Function
g = np.ones_like(f)
g[0] = np.minimum(np.arange(n) + 1,
                  n - 1)  # Raise lactation number by 1, if keep

# Model Structure

model = DDPmodel(f, g, delta)
model.solve()

# Plot Optimal Policy
demo.figure('Optimal Replacement', 'Age', 'Optimal Decision', [0, n + 1],
            [-0.5, 1.5])
plt.plot(S, model.policy, '*', markersize=15)
plt.yticks((0, 1), X)

# Plot Value Function
demo.figure('Optimal Value in Cow Replacement', 'Age', 'Value (thousands)')
plt.plot(S, model.value / 1000)

plt.show()
Esempio n. 23
0
                                          )  # deterministic steady-state stock

# Check Model Derivatives
# dpcheck(model,sstar,xstar)

## SOLUTION

# Compute Linear-Quadratic Approximation at Collocation Nodes
model.lqapprox(sstar, xstar)

# Solve Bellman Equation
model.solve()  # no need to pass LQ to model, it's already there
resid, s, v, x = model.solution()

# Plot Optimal Policy
demo.figure('Optimal Irrigation Policy', 'Reservoir Level', 'Irrigation')
plt.plot(s, x.T)

# Plot Value Function
demo.figure('Value Function', 'Reservoir Level', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Reservoir Level', 'Shadow Price')
plt.plot(s, model.Value(s, 1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Reservoir Level', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, smin, smax, 'k', '--')
Esempio n. 24
0
for k in range(m):
    for i in range(n):
        for j in range(r.size):
            snext = min(S[i] - X[k] + r[j], maxcap)
            inext = getindex(snext, S)
            P[k, i, inext] = P[k, i, inext] + p[j]

# Model Structure
model = DDPmodel(f, P, delta)
model.solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Irrigation Policy', 'Water Level', 'Irrigation', [-1, 31],
            [0, 6])
plt.plot(S, X[model.policy], '*')

# Plot Value Function
demo.figure('Optimal Value Function', 'Water Level', 'Value')
plt.plot(S, model.value)

# Simulate Model
sinit = np.zeros(10000)
nyrs = 30
t = np.arange(1 + nyrs)
spath, xpath = model.simulate(sinit, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Water Level')
plt.plot(t, S[spath].mean(1))
Esempio n. 25
0
g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k]) ** 2
    g[k] = getindex(snext, S)



# Model Structure
model = DDPmodel(f, g, delta)
model.solve()
   

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S,X[model.policy])


# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S,model.value)


# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
Esempio n. 26
0
# Intialize Value Function
# c = zeros(n,1)                         # conditional value function basis coefficients

# Solve Bellman Equation and Compute Critical Exercise Prices
f = NLP(lambda p: K - np.exp(p) - delta * Value(p))

pcrit = np.empty(N + 1)

pcrit[0] = f.zero(0.0)

for t in range(N):
    v = np.zeros((1, n))
    for k in range(m):
        pnext = Value.nodes + e[k]
        v += w[k] * np.maximum(K - np.exp(pnext), delta * Value(pnext))

    Value[:] = v
    pcrit[t + 1] = f.broyden(pcrit[t])

# Print Critical Exercise Price 300 Periods to Expiration

print('Critical Exercise Price 300 Periods to Expiration')
print('   Critical Price  = {:5.2f}'.format(np.exp(pcrit[-1])))

# Plot Critical Exercise Prices
demo.figure('American Put Option Optimal Exercise Boundary',
            'Periods Remaining Until Expiration', 'Exercise Price')
plt.plot(np.exp(pcrit))
plt.show()
Esempio n. 27
0
# Check Model Derivatives
# dpcheck(model,sstar,xstar)


## SOLUTION

# Compute Linear-Quadratic Approximation at Collocation Nodes
model.lqapprox(sstar, xstar)

# Solve Bellman Equation
model.solve() # no need to pass LQ to model, it's already there
resid, s, v, x = model.solution()

# Plot Optimal Policy
demo.figure('Optimal Irrigation Policy', 'Reservoir Level', 'Irrigation')
plt.plot(s, x.T)

# Plot Value Function
demo.figure('Value Function', 'Reservoir Level', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Reservoir Level', 'Shadow Price')
plt.plot(s, model.Value(s, 1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Reservoir Level', 'Residual')
plt.plot(s,resid.T)
plt.hlines(0, smin, smax, 'k', '--')
Esempio n. 28
0
model = DDPmodel(f, P, delta).solve(print=True)


## Analysis

# Display Optimal Policy
xtemp = model.policy.reshape((n1, n2))
header = '{:^8s} {:^8s}  {:^8s}  {:^8s}'.format('Age','Lo', 'Med', 'Hi')

print('Optimal Policy')
print(header)
print(*('{:^8d} {:^8s}  {:^8s}  {:^8s}\n'.format(s, *X[x]) for s, x in zip(s1, xtemp)))


# Plot Value Function
demo.figure('Optimal Replacement Value', 'Age', 'Optimal Value (thousands)')
plt.plot(s1, model.value.reshape((n1,n2)) / 1000)
plt.legend(['Low','Med','Hi'])



# Compute Steady-State distribution
pi = model.markov().reshape((n1, n2))

# Display Steady-State distribution
print('          Invariant Distribution     ')
print(header)
print(*('{:^8d} {:8.3f}  {:8.3f}  {:8.3f}\n'.format(s, *x) for s, x in zip(s1, pi)))


# Compute Steady-State Mean Cow Age and Productivity
Esempio n. 29
0
# State Transition Function

g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k])**2
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S, X[model.policy])

# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])
Esempio n. 30
0
# Model Structure
model = DDPmodel(f, P, delta).solve(print=True)

## Analysis

# Display Optimal Policy
xtemp = model.policy.reshape((n1, n2))
header = '{:^8s} {:^8s}  {:^8s}  {:^8s}'.format('Age', 'Lo', 'Med', 'Hi')

print('Optimal Policy')
print(header)
print(*('{:^8d} {:^8s}  {:^8s}  {:^8s}\n'.format(s, *X[x])
        for s, x in zip(s1, xtemp)))

# Plot Value Function
demo.figure('Optimal Replacement Value', 'Age', 'Optimal Value (thousands)')
plt.plot(s1, model.value.reshape((n1, n2)) / 1000)
plt.legend(['Low', 'Med', 'Hi'])

# Compute Steady-State distribution
pi = model.markov().reshape((n1, n2))

# Display Steady-State distribution
print('          Invariant Distribution     ')
print(header)
print(*('{:^8d} {:8.3f}  {:8.3f}  {:8.3f}\n'.format(s, *x)
        for s, x in zip(s1, pi)))

# Compute Steady-State Mean Cow Age and Productivity
pi = pi.flatten()
avgage = np.dot(pi.T, S1)
Esempio n. 31
0
# Deterministic Steady-State
kstar = ((1 - delta * gamma) / (delta * beta))**(
    1 / (beta - 1))  # determistic steady-state capital investment
sstar = gamma * kstar + kstar**beta  # deterministic steady-state wealth

# Check Model Derivatives
# dpcheck(model,sstar,kstar)

## SOLUTION

# Solve Bellman Equation
growth.solve()
resid, s, v, k = growth.solution()

# Plot Optimal Policy
demo.figure('Optimal Investment Policy', 'Wealth', 'Investment')
plt.plot(s, k.T)

# Plot Value Function
demo.figure('Value Function', 'Wealth', 'Value')
plt.plot(s, v.T)

# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(s, growth.Value(s, order=1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Wealth', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, smin, smax, 'k', '--')
Esempio n. 32
0
f = np.zeros((m, n))
f[0] = 50 - 2.5 * S - 2.5 * S ** 2
f[1] = 50 - repcost
f[0, -1] = -np.inf

# State Transition Function
g = np.zeros_like(f)
g[0] = np.arange(1, n + 1)
g[0, -1] = n - 1  # adjust last state so it doesn't go out of bounds

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()


## Analysis

# Simulate Model
sinit, nyrs = S.min() - 1, 12
t = np.arange(1 + nyrs)
spath, xpath = model.simulate(sinit, nyrs)

# Plot Optimal Value
demo.figure('Optimal Value Function', 'Age of Machine', 'Value')
plt.plot(S, model.value)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Age of Machine', [0, 12])
plt.plot(t, S[spath])

plt.show()