Exemplo n.º 1
0
__author__ = 'Randall'


from demos.setup import np, plt, demo
from compecon import DDPmodel
from compecon.tools import getindex

# DEMDDP01 Mine management model

# Model Parameters
price = 1                    # price of ore
sbar  = 100                  # initial ore stock
delta = 0.9                  # discount factor

# State Space
S = np.arange(sbar + 1)      # vector of states
n = S.size                   # number of states

# Action Space
X = np.arange(sbar + 1)      # vector of actions
m = X.size                   # number of actions

# Reward Function
f = np.full((m, n), -np.inf)
for c, s in enumerate(S):
    for r, x in enumerate(X):
        if x <= s:
            f[r, c] = price * x - (x ** 2) / (1 + s)

# State Transition Function
g = np.empty_like(f)
Exemplo n.º 2
0
from compecon import DDPmodel
from compecon.tools import gridmake, getindex



## DEMDDP06 Bioeconomic model

# Model Parameters
T     = 10                   # foraging periods
emax  =  8                   # energy capacity
e     = [2,  4,  4]            # energy from foraging
p     = [1.0, 0.7, 0.8]        # predation survival probabilities
q     = [0.5, 0.8, 0.7]        # foraging success probabilities

# State Space
S = np.arange(emax + 1)                # energy levels
n = S.size                # number of states

# Action Space
X = np.arange(3) + 1        # vector of actions
m = X.size               	# number of actions

# Reward Function
f = np.zeros((m, n))

# State Transition Probability Matrix
P = np.zeros((m, n, n))
for k in range(m):
    P[k, 0, 0] = 1
    i = range(1, n)
    # does not survive predation
Exemplo n.º 3
0
# In[4]:


nplot = 1001
x = np.linspace(a, b, nplot)
y = runge(x)


# Plot Runge's Function

# Initialize data matrices

# In[5]:


n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))


# Compute approximation errors on refined grid and interpolation matrix condition numbers

# In[6]:


for i in range(nn):
    # Uniform-node monomial-basis approximant
    xnodes = np.linspace(a, b, n[i])
    c = np.polyfit(xnodes, runge(xnodes), n[i])
    yfit = np.polyval(c, x)
Exemplo n.º 4
0
nplot = 1001
x = np.linspace(a, b, nplot)
y = runge(x)

# Plot Runge's Function
fig1 = plt.figure(figsize=[6, 9])
ax1 = fig1.add_subplot(211,
                       title="Runge's Function",
                       xlabel='',
                       ylabel='y',
                       xticks=[])
ax1.plot(x, y)
ax1.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)

# Initialize data matrices
n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))

# Compute approximation errors on refined grid and interpolation matrix condition numbers
for i in range(nn):
    # Uniform-node monomial-basis approximant
    xnodes = np.linspace(a, b, n[i])
    c = np.polyfit(xnodes, runge(xnodes), n[i])
    yfit = np.polyval(c, x)
    phi = xnodes.reshape(-1, 1)**np.arange(n[i])

    errunif[i] = yfit - y
    nrmunif[i] = np.log10(norm(yfit - y, np.inf))
    conunif[i] = np.log10(cond(phi, 2))
Exemplo n.º 5
0
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S, X[model.policy])

# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

# Plot Optimal Transition Function
demo.figure('Optimal State Transition Function', 'S(t)', 'S(t+1)')
ii, jj = np.where(model.transition)
plt.plot(S[ii], S[jj], S, S, '--')

plt.show()
Exemplo n.º 6
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel
from compecon.tools import gridmake, getindex

# DEMDDP03 Asset replacement model with maintenance

# Model Parameters
maxage  = 5                  # maximum asset age
repcost = 75                 # replacement cost
mancost = 10               	# maintenance cost
delta   = 0.9                # discount factor

# State Space
s1 = np.arange(1, 1 + maxage)   # asset age
s2 = s1 - 1   	                # servicings
S  = gridmake(s1,s2)     	# combined state grid
S1, S2 = S
n = S1.size                  	# total number of states

# Action Space
X = ['no action', 'service', 'replace']	 # vector of actions
m = len(X)              	# number of actions

# Reward Function
f = np.zeros((m, n))
q = 50 - 2.5 * S1 - 2.5 * S1 ** 2
f[0] = q * np.minimum(1, 1 - (S1 - S2) / maxage)
f[1] = q * np.minimum(1, 1 - (S1 - S2 - 1) / maxage) - mancost
f[2] = 50 - repcost
Exemplo n.º 7
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel

## DEMDDP09 Deterministic cow replacement model

# Model Parameters
delta = 0.9  # discount factor
cost = 500  # replacement cost
price = 150  # milk price

# State Space
S = np.arange(1, 11)  # lactation states
n = S.size  # number of states

# Action Space (keep='K', replace='R')
X = ['Keep', 'Replace']  # keep or replace
m = len(X)  # number of actions

# Reward Function
f = np.empty((m, n))
y = -0.2 * S**2 + 2 * S + 8  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, -1] = -np.inf  # force replace at lactation 10

# State Transition Function
g = np.ones_like(f)
g[0] = np.minimum(np.arange(n) + 1,
                  n - 1)  # Raise lactation number by 1, if keep
Exemplo n.º 8
0
model = DDPmodel(f, g, delta)
model.solve()
   

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Harvest Policy', 'Stock', 'Harvest')
plt.plot(S,X[model.policy])


# Plot Value Function
demo.figure('Optimal Value Function', 'Stock', 'Value')
plt.plot(S,model.value)


# Simulate Model
nyrs = 20
t = np.arange(nyrs + 1)
spath, xpath = model.simulate(n - 1, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Stock')
plt.plot(t, S[spath])

# Plot Optimal Transition Function
demo.figure('Optimal State Transition Function', 'S(t)', 'S(t+1)')
ii, jj = np.where(model.transition)
plt.plot(S[ii], S[jj], S, S, '--')

plt.show()
Exemplo n.º 9
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel

## DEMDDP02 Asset replacement model

# Model Parameters
maxage = 5  # maximum machine age
repcost = 75  # replacement cost
delta = 0.9  # discount factor

# State Space
S = np.arange(1, 1 + maxage)  # machine age
n = S.size  # number of states

# Action Space (keep=1, replace=2)
X = ['keep', 'replace']  # vector of actions
m = len(X)  # number of actions

# Reward Function
f = np.zeros((m, n))
f[0] = 50 - 2.5 * S - 2.5 * S**2
f[1] = 50 - repcost
f[0, -1] = -np.inf

# State Transition Function
g = np.zeros_like(f)
g[0] = np.arange(1, n + 1)
g[0, -1] = n - 1  # adjust last state so it doesn't go out of bounds
Exemplo n.º 10
0
from demos.setup import np, plt, tic, toc
from numpy.linalg import solve
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix

""" Sparse linear equations"""
AA = np.random.rand(1000, 1000)
bb = np.random.rand(1000, 1)
for i in range(1000):
    for j in range(1000):
        if abs(i - j) > 1:
            AA[i,j] = 0

n = np.hstack((np.arange(50, 250, 50), np.arange(300, 1100, 100)))
ratio = np.empty(n.size)
for k in range(n.size):
    A = AA[:n[k], :n[k]]
    b = bb[:n[k]]
    tt = tic()
    for i in range(100):
        x = solve(A, b)

    toc1 = toc(tt)

    S = csc_matrix(A)
    tt = tic()
    for i in range(100):
        x = spsolve(S, b)

    toc2 = toc(tt)
    ratio[k] = toc2 / toc1
Exemplo n.º 11
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel


## DEMDDP09 Deterministic cow replacement model


# Model Parameters
delta = 0.9                  # discount factor
cost  = 500                  # replacement cost
price = 150                  # milk price

# State Space
S = np.arange(1, 11)         # lactation states
n = S.size                   # number of states

# Action Space (keep='K', replace='R')
X = ['Keep', 'Replace']      # keep or replace
m = len(X)                   # number of actions

# Reward Function
f = np.empty((m, n))
y = -0.2 * S ** 2 + 2 * S + 8  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, -1] = -np.inf               # force replace at lactation 10

# State Transition Function
g = np.ones_like(f)
Exemplo n.º 12
0
## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)


# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Wealth')
plt.plot(t, S[st])

# Compute Steady State Distribution and Mean
pi = model.markov()
avgstock = np.dot(S, pi)
print('Steady-state Wealth     {:8.2f},  {:8.2f}'.format(*avgstock))

plt.show()
Exemplo n.º 13
0
# Model Parameters
T = 0.5                 # years to expiration
sigma = 0.2                 # annual volatility
r = 0.05                # annual interest rate
strike = 2.1                 # option strike price
p0 = 2.0                 # current asset price

# Discretization Parameters
N = 100                 # number of time intervals
tau = T / N              	# length of time intervals
delta = np.exp(-r * tau)   	# discount factor
u = np.exp(sigma * np.sqrt(tau))	# up jump factor
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma) # up jump probability

# State Space
price = p0 * u ** np.arange(-N, N+1)      # asset prices
n = price.size        # number of states

# Action Space (hold=1, exercise=2)
X = ['hold', 'exercise']   	# vector of actions
m = len(X)               	# number of actions

# Reward Function
f = np.zeros((m,n))
f[1] = strike - price

# State Transition Probability Matrix
P = np.zeros((m, n, n))

for i in range(n):
    P[0, i, min(i + 1, n - 1)] = q
Exemplo n.º 14
0
# State Transition Probability Matrix
P = np.zeros((m, n, n))
P[0, :, 0] = 1                 # remains unemployed
P[1, 0, 0] = 1 - pfind           # finds no job
P[1, 0, 1] = pfind             # finds job
P[1, 1, 0] = pfire             # gets fired
P[1, 1, 1] = 1 - pfire           # keeps job

# Model Structure
model = DDPmodel(f, P, delta)

## Solution

# Solve Bellman Equation
wage = np.arange(55, 66)
xtable = np.zeros((wage.size, 2), dtype=int)
for i, w in enumerate(wage):
    model.reward[1, 1] = w  # vary wage
    xtable[i] = model.solve().policy  # solve via policy iteration

## Analysis

# Display Optimal Policy
print('  Optimal Job Search Strategy')
print('   (1=innactive, 2=active)   ')
print('  Wage  Unemployed  Employed ')


print(*['{:4d}  {:10s}{:10s}\n'.format(w, X[u], X[e]) for w, (u, e) in zip(wage, xtable)])
Exemplo n.º 15
0
from compecon.tools import gridmake, getindex

# DEMDDP05 Water management model

# Model Parameters
alpha1 = 14  # producer benefit function parameter
beta1 = 0.8  # producer benefit function parameter
alpha2 = 10  # recreational user benefit function parameter
beta2 = 0.4  # recreational user benefit function parameter
maxcap = 30  # maximum dam capacity
r = np.array([0, 1, 2, 3, 4])  # rain levels
p = np.array([0.1, 0.2, 0.4, 0.2, 0.1])  # rain probabilities
delta = 0.9  # discount factor

# State Space
S = np.arange(1 + maxcap)  # vector of states
n = S.size  # number of states

# Action Space
X = np.arange(1 + maxcap)  # vector of actions
m = X.size  # number of actions

# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, k:] = alpha1 * X[k]**beta1 + alpha2 * (S[k:] - X[k])**beta2

# State Transition Probability Matrix
P = np.zeros((m, n, n))

for k in range(m):
Exemplo n.º 16
0
model = DDPmodel(f, g, delta).solve()

## Analysis

# Plot Optimal Policy
demo.figure('Optimal Investment', 'Wealth', 'Investment')
plt.plot(S, X[model.policy] * S)

# Plot Optimal Policy
demo.figure('Optimal Consumption', 'Wealth', 'Consumption')
plt.plot(S, S - X[model.policy] * S)

# Plot Value Function
demo.figure('Optimal Value Function', 'Wealth', 'Value')
plt.plot(S, model.value)

# Simulate Model
nyrs = 20
t = np.arange(0, nyrs + 1)
st, xt = model.simulate(smin, nyrs)

# Plot State Path
demo.figure('Optimal State Path', 'Year', 'Wealth')
plt.plot(t, S[st])

# Compute Steady State Distribution and Mean
pi = model.markov()
avgstock = np.dot(S, pi)
print('Steady-state Wealth     {:8.2f},  {:8.2f}'.format(*avgstock))

plt.show()
Exemplo n.º 17
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel
from compecon.tools import gridmake


## DEMDDP10 Stochastic cow replacement model

# Model Parameters
delta = 0.9                  # discount factor
cost = 500                  # replacement cost
price = 150                  # milk price

# State Space
s1 = np.arange(10) + 1     # lactation states
s2 = np.array([0.8, 1.0, 1.2])           # productivity states
n1 = s1.size              # number of lactation states
n2 = s2.size              # number of productivity states
S1, S2 = gridmake(s1,s2)    # combined state grid
n = n1 * n2                    # total number of states

# Action Space (keep='K', replace='R')
X = np.array(['Keep','Replace'])          # keep or replace
m = X.size                 	# number of actions

# Reward Function
f = np.empty((m, n))
y = (-0.2 * S1 ** 2 + 2 * S1 +8) * S2  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
Exemplo n.º 18
0
# State Transition Probability Matrix
P = np.zeros((m, n, n))
P[0, :, 0] = 1  # remains unemployed
P[1, 0, 0] = 1 - pfind  # finds no job
P[1, 0, 1] = pfind  # finds job
P[1, 1, 0] = pfire  # gets fired
P[1, 1, 1] = 1 - pfire  # keeps job

# Model Structure
model = DDPmodel(f, P, delta)

## Solution

# Solve Bellman Equation
wage = np.arange(55, 66)
xtable = np.zeros((wage.size, 2), dtype=int)
for i, w in enumerate(wage):
    model.reward[1, 1] = w  # vary wage
    xtable[i] = model.solve().policy  # solve via policy iteration

## Analysis

# Display Optimal Policy
print('  Optimal Job Search Strategy')
print('   (1=innactive, 2=active)   ')
print('  Wage  Unemployed  Employed ')

print(*[
    '{:4d}  {:10s}{:10s}\n'.format(w, X[u], X[e])
    for w, (u, e) in zip(wage, xtable)
Exemplo n.º 19
0
__author__ = 'Randall'

from demos.setup import np, plt, demo
from compecon import DDPmodel
from compecon.tools import gridmake

## DEMDDP10 Stochastic cow replacement model

# Model Parameters
delta = 0.9  # discount factor
cost = 500  # replacement cost
price = 150  # milk price

# State Space
s1 = np.arange(10) + 1  # lactation states
s2 = np.array([0.8, 1.0, 1.2])  # productivity states
n1 = s1.size  # number of lactation states
n2 = s2.size  # number of productivity states
S1, S2 = gridmake(s1, s2)  # combined state grid
n = n1 * n2  # total number of states

# Action Space (keep='K', replace='R')
X = np.array(['Keep', 'Replace'])  # keep or replace
m = X.size  # number of actions

# Reward Function
f = np.empty((m, n))
y = (-0.2 * S1**2 + 2 * S1 + 8) * S2  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, S1 == 10] = -np.inf  # force replace at lactation 10
Exemplo n.º 20
0
# Model Parameters
A       = 6                                # maximum asset age 
alpha   = np.array([50, -2.5, -2.5])       # production function coefficients
kappa   = 40                               # net replacement cost
pbar    = 1                                # long-run mean unit profit
gamma   = 0.5                              # unit profit autoregression coefficient
sigma   = 0.15                             # standard deviation of unit profit shock
delta   = 0.9                              # discount factor 

# Continuous State Shock Distribution
m = 5                                      # number of unit profit shocks
[e,w] = qnwnorm(m,0,sigma ** 2)               # unit profit shocks and probabilities

# Deterministic Discrete State Transitions
h = np.zeros((2, A))
h[0, :-1] = np.arange(1, A)

# Approximation Structure
n  = 200                                   # number of collocation nodes
pmin = 0                                   # minimum unit profit
pmax = 2                                   # maximum unit profit
basis = BasisSpline(n, pmin, pmax, labels=['unit profit'])        # basis functions

# Model Structure

def profit(p, x, i, j):
    a = i + 1
    if j or a == A:
        return p * 50 - kappa
    else:
        return p * (alpha[0] + alpha[1] * a + alpha[2] * a ** 2 )
Exemplo n.º 21
0
__author__ = 'Randall'


from demos.setup import np, plt, demo
from compecon import DDPmodel


## DEMDDP02 Asset replacement model

# Model Parameters
maxage  = 5                  # maximum machine age
repcost = 75               	# replacement cost
delta   = 0.9                # discount factor

# State Space
S = np.arange(1, 1 + maxage)  # machine age
n = S.size                	  # number of states

# Action Space (keep=1, replace=2)
X = ['keep', 'replace']     	# vector of actions
m = len(X)                  	# number of actions


# Reward Function
f = np.zeros((m, n))
f[0] = 50 - 2.5 * S - 2.5 * S ** 2
f[1] = 50 - repcost
f[0, -1] = -np.inf

# State Transition Function
g = np.zeros_like(f)