Esempio n. 1
0
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S, X[model.policy])

# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

# Plot Optimal Transition Function
demo.figure('Optimal State Transition Function', 'S(t)', 'S(t+1)')
ii, jj = np.where(model.transition)
plt.plot(S[ii], S[jj], S, S, '--')

plt.show()
Esempio n. 2
0
model = DDPmodel(f, g, delta)
model.solve()
   

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S,X[model.policy])


# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S,model.value)


# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

# Plot Optimal Transition Function
demo.figure('Optimal State Transition Function', 'S(t)', 'S(t+1)')
ii, jj = np.where(model.transition)
plt.plot(S[ii], S[jj], S, S, '--')

plt.show()
Esempio n. 3
0
for r, x in enumerate(X):
    snext = S - x
    g[r] = getindex(snext, S)


# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

# Analysis

# Simulate Model
sinit = S.max()
nyrs = 15
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(sinit, nyrs)

# Plot Optimal Policy
demo.figure('Optimal Extraction Policy', 'Stock', 'Extraction')
plt.plot(S, X[model.policy])

# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S, model.value)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

plt.show()
model = DDPmodel(f, g, delta).solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Wealth')
plt.plot(t, S[st])

# Compute Steady State Distribution and Mean
pi = model.markov()
avgstock = np.dot(S, pi)
print('Steady-state Wealth     {:8.2f},  {:8.2f}'.format(*avgstock))

plt.show()
Esempio n. 5
0
## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)


# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Wealth')
plt.plot(t, S[st])

# Compute Steady State Distribution and Mean
pi = model.markov()
avgstock = np.dot(S, pi)
print('Steady-state Wealth     {:8.2f},  {:8.2f}'.format(*avgstock))

plt.show()