示例#1
0
y = runge(x)

# Plot Runge's Function
fig1 = plt.figure(figsize=[6, 9])
ax1 = fig1.add_subplot(211,
                       title="Runge's Function",
                       xlabel='',
                       ylabel='y',
                       xticks=[])
ax1.plot(x, y)
ax1.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)

# Initialize data matrices
n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))

# Compute approximation errors on refined grid and interpolation matrix condition numbers
for i in range(nn):
    # Uniform-node monomial-basis approximant
    xnodes = np.linspace(a, b, n[i])
    c = np.polyfit(xnodes, runge(xnodes), n[i])
    yfit = np.polyval(c, x)
    phi = xnodes.reshape(-1, 1)**np.arange(n[i])

    errunif[i] = yfit - y
    nrmunif[i] = np.log10(norm(yfit - y, np.inf))
    conunif[i] = np.log10(cond(phi, 2))

    # Chebychev-node Chebychev-basis approximant
__author__ = 'Randall'

from demos.setup import np, plt
# from quantecon import MarkovChain
""" Simulate Simple Markov Chain """
''' FORMULATION '''

# Model Parameters
gamma = 0.07  # aggregate unemployment rate
eta = 2.0  # expected duration of unemployment
y = [0.51, 1.0]  # income per employment state
delta = 0.90  # discount factor

# Employment Transition Probabilities
q = np.zeros([2, 2])
q[0, 0] = 1 - 1 / eta
q[1, 0] = gamma * (1 - q[0, 0]) / (1 - gamma)
q[0, 1] = 1 - q[0, 0]
q[1, 1] = 1 - q[1, 0]

# Compute Expected Lifetime Income
e = np.linalg.solve(np.identity(2) - delta * q, y)

# Compute Stationary Distribution of Employment Expected Employment State Durations
# p = MarkovChain(q)  # not exactly what markov is in matlab

# TODO: Finish this demo!!  Markov chain not imported yet from quantecon
S1, S2 = gridmake(s1, s2)  # combined state grid
n = n1 * n2  # total number of states

# Action Space (keep='K', replace='R')
X = np.array(['Keep', 'Replace'])  # keep or replace
m = X.size  # number of actions

# Reward Function
f = np.empty((m, n))
y = (-0.2 * S1**2 + 2 * S1 + 8) * S2  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, S1 == 10] = -np.inf  # force replace at lactation 10

# State Transition Probability Matrix
P = np.zeros((2, n1, n2, n1, n2))
for i in range(n1):
    for j in range(n2):
        if i < 9:
            P[0, i, j, i + 1, j] = 1  # Raise lactation number by 1, if keep
        else:
            P[0, i, j, 0] = 0.2, 0.6, 0.2

        P[1, i, j, 0] = 0.2, 0.6, 0.2  # Optional replacement

P.shape = 2, n, n

# Model Structure
model = DDPmodel(f, P, delta).solve(print=True)

## Analysis
nplot = 1001
x = np.linspace(a, b, nplot)
y = runge(x)


# Plot Runge's Function

# Initialize data matrices

# In[5]:


n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))


# Compute approximation errors on refined grid and interpolation matrix condition numbers

# In[6]:


for i in range(nn):
    # Uniform-node monomial-basis approximant
    xnodes = np.linspace(a, b, n[i])
    c = np.polyfit(xnodes, runge(xnodes), n[i])
    yfit = np.polyval(c, x)
    phi = xnodes.reshape(-1, 1) ** np.arange(n[i])
from demos.setup import np, plt
# from quantecon import MarkovChain





""" Simulate Simple Markov Chain """
''' FORMULATION '''

# Model Parameters
gamma = 0.07     # aggregate unemployment rate
eta = 2.0        # expected duration of unemployment
y = [0.51, 1.0]  # income per employment state
delta = 0.90     # discount factor

# Employment Transition Probabilities
q = np.zeros([2, 2])
q[0, 0] = 1 - 1 / eta
q[1, 0] = gamma * (1 - q[0, 0]) / (1 - gamma)
q[0, 1] = 1 - q[0, 0]
q[1, 1] = 1 - q[1, 0]

# Compute Expected Lifetime Income
e = np.linalg.solve(np.identity(2) - delta * q, y)

# Compute Stationary Distribution of Employment Expected Employment State Durations
# p = MarkovChain(q)  # not exactly what markov is in matlab


# TODO: Finish this demo!!  Markov chain not imported yet from quantecon
def f(x):
    g = np.zeros((3, x.size))
    g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
    return g
示例#7
0
def f(x):
    g = np.zeros((3, x.size))
    g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
    return g
u     =  50                  # weekly unemp. benefit
v     =  60                  # weekly value of leisure
pfind = 0.90                 # prob. of finding job
pfire = 0.10                 # prob. of being fired
delta = 0.99                 # discount factor

# State Space
S = np.array([1, 2])         # vector of states
n = S.size                   # number of states

# Action Space (idle=1, active=2)
X = ['idle', 'active']          	# vector of actions
m = len(X)               	# number of actions

# Reward Function
f = np.zeros((m, n))
f[0] = v                   # gets leisure
f[1, 0] = u                   # gets benefit


# State Transition Probability Matrix
P = np.zeros((m, n, n))
P[0, :, 0] = 1                 # remains unemployed
P[1, 0, 0] = 1 - pfind           # finds no job
P[1, 0, 1] = pfind             # finds job
P[1, 1, 0] = pfire             # gets fired
P[1, 1, 1] = 1 - pfire           # keeps job

# Model Structure
model = DDPmodel(f, P, delta)
示例#9
0
int = F(x, -1)
print('\nThe approximate integral of exp(-x) between x=-1 and x=0 is {:.15f}'.
      format(int))
print("The  'exact'  integral  of  exp(-x) between x=-1 and x=0 is {:.15f}".
      format(np.exp(1) - 1))

# One may evaluate the accuracy of the Chebychev polynomial approximant by
# computing the approximation error on a highly refined grid of points:
ngrid = 5001  # number of grid nodes
xgrid = np.linspace(a, b, ngrid)  # generate refined grid for plotting
yapp = F(xgrid)  # approximant values at grid nodes
yact = f(xgrid)  # actual function values at grid points

demo.figure('Chebychev Approximation Error for exp(-x)', 'x', 'Error')
plt.plot(xgrid, yapp - yact)
plt.plot(xgrid, np.zeros(ngrid), 'k--', linewidth=2)

# The plot indicates that an order 10 Chebychev approximation scheme, produces approximation errors
# no bigger in magnitude than 6x10^-10. The approximation error exhibits the "Chebychev equioscillation
# property", oscilating relatively uniformly throughout the approximation domain.
#
# This commonly occurs when function being approximated is very smooth, as is the case here but should not
# be expected when the function is not smooth.  Further notice how the approximation error is exactly 0 at the
# approximation nodes --- which is true by contruction.

# Let us repeat the approximation exercise, this time constructing a
# 21-function cubic spline approximant:
n = 21  # order of approximation
S = BasisSpline(n, a, b, f=f)  # define basis
yapp = S(xgrid)  # approximant values at grid nodes
示例#10
0
    return fval


# In[3]:

A = np.array
as_ = A([9, 3, 18])
bs = A([1, 2, 1])
ad = A([42, 54, 51])
bd = A([3, 2, 1])
c = A([[0, 3, 9], [3, 0, 3], [6, 3, 0]])
params = (as_, bs, ad, bd, c)

# In[4]:

a = np.zeros(9)
b = np.full(9, np.inf)
x0 = np.zeros(9)
Market = MCP(market, a, b, x0, *params)

# In[5]:

x = Market.zero(print=True)

print('Function value at solution\n\t', Market.original(x))
print('Lower bound is binding\n\t', Market.a_is_binding)
print('Upper bound is binding\n\t', Market.b_is_binding)
# print(Market.ssmooth(x))
# print(Market.minmax(x))

# In[6]:
示例#11
0
# Model Parameters
maxage = 5  # maximum machine age
repcost = 75  # replacement cost
delta = 0.9  # discount factor

# State Space
S = np.arange(1, 1 + maxage)  # machine age
n = S.size  # number of states

# Action Space (keep=1, replace=2)
X = ['keep', 'replace']  # vector of actions
m = len(X)  # number of actions

# Reward Function
f = np.zeros((m, n))
f[0] = 50 - 2.5 * S - 2.5 * S**2
f[1] = 50 - repcost
f[0, -1] = -np.inf

# State Transition Function
g = np.zeros_like(f)
g[0] = np.arange(1, n + 1)
g[0, -1] = n - 1  # adjust last state so it doesn't go out of bounds

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis
示例#12
0
__author__ = 'Randall'

# DEMAPP10 Monopolist's Effective Supply Function


# Residual Function
def resid(c):
    Q.c = c
    q = Q(p)
    return p + q / (-3.5 * p**(-4.5)) - np.sqrt(q) - q**2


# Approximation structure
n, a, b = 21, 0.5, 2.5
Q = BasisChebyshev(n, a, b)
c0 = np.zeros(n)
c0[0] = 2
p = Q.nodes

# Solve for effective supply function
monopoly = NLP(resid)
Q.c = monopoly.broyden(c0)

# Setup plot
nplot = 1000
p = nodeunif(nplot, a, b)
rplot = resid(Q.c)

# Plot effective supply
demo.figure("Monopolist's Effective Supply Curve", 'Quantity', 'Price')
plt.plot(Q(p), p)
示例#13
0
mancost = 10               	# maintenance cost
delta   = 0.9                # discount factor

# State Space
s1 = np.arange(1, 1 + maxage)   # asset age
s2 = s1 - 1   	                # servicings
S  = gridmake(s1,s2)     	# combined state grid
S1, S2 = S
n = S1.size                  	# total number of states

# Action Space
X = ['no action', 'service', 'replace']	 # vector of actions
m = len(X)              	# number of actions

# Reward Function
f = np.zeros((m, n))
q = 50 - 2.5 * S1 - 2.5 * S1 ** 2
f[0] = q * np.minimum(1, 1 - (S1 - S2) / maxage)
f[1] = q * np.minimum(1, 1 - (S1 - S2 - 1) / maxage) - mancost
f[2] = 50 - repcost


# State Transition Function
g = np.empty_like(f)
g[0] = getindex(np.c_[S1 + 1, S2], S)
g[1] = getindex(np.c_[S1 + 1, S2 + 1], S)
g[2] = getindex(np.c_[1, 0], S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()
示例#14
0
T     = 10                   # foraging periods
emax  =  8                   # energy capacity
e     = [2,  4,  4]            # energy from foraging
p     = [1.0, 0.7, 0.8]        # predation survival probabilities
q     = [0.5, 0.8, 0.7]        # foraging success probabilities

# State Space
S = np.arange(emax + 1)                # energy levels
n = S.size                # number of states

# Action Space
X = np.arange(3) + 1        # vector of actions
m = X.size               	# number of actions

# Reward Function
f = np.zeros((m, n))

# State Transition Probability Matrix
P = np.zeros((m, n, n))
for k in range(m):
    P[k, 0, 0] = 1
    i = range(1, n)
    # does not survive predation
    snext = 0
    j = getindex(snext, S)
    P[k, i, j] += 1 - p[k]
    # survives predation, finds food
    snext = S[i] - 1 + e[k]
    j = getindex(snext, S)
    P[k, i, j] += p[k] * q[k]
    # survives predation, finds no food
示例#15
0
# State Space
S = np.arange(1 + maxcap)  # vector of states
n = S.size  # number of states

# Action Space
X = np.arange(1 + maxcap)  # vector of actions
m = X.size  # number of actions

# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, k:] = alpha1 * X[k]**beta1 + alpha2 * (S[k:] - X[k])**beta2

# State Transition Probability Matrix
P = np.zeros((m, n, n))

for k in range(m):
    for i in range(n):
        for j in range(r.size):
            snext = min(S[i] - X[k] + r[j], maxcap)
            inext = getindex(snext, S)
            P[k, i, inext] = P[k, i, inext] + p[j]

# Model Structure
model = DDPmodel(f, P, delta)
model.solve()

## Analysis

# Plot Optimal Policy
示例#16
0
print('Interpolation coeff =\n ', c)

# Alternatively, one may compute the standard approximation nodes x and corresponding function values y and use these
# values to create an BasisChebyshev object with keyword argument y:
x = basis.nodes
y = f(x)
fa = BasisChebyshev([n, n], a, b, y=y)
print('Interpolation coeff =\n ', fa.c)  # attribute c returns the coefficients

# ... or one may simply pass the function directly to BasisChebyshev using keyword 'f', which by default
# will evaluate it at the basis nodes
F = BasisChebyshev([n, n], a, b, f=f)
print('Interpolation coeff =\n ', F.c)

# Having created a BasisChebyshev object, one may now evaluate the approximant at any point x by calling the object:
x = np.zeros([2, 1])  # first dimension should match the basis dimension
y = F(x)
print('The exact and approximate value of f at x=[0 0] are')
print('{:4.0f}  {:20.15f}\n'.format(0, y))

# ... one may also evaluate the approximant's first partial derivatives at x:
d1 = F(x, [1, 0])
d2 = F(x, [0, 1])
print(
    'The exact and approximate partial derivatives of f w.r.t. x1 at x=[0 0] are'
)
print('{:4.0f}  {:20.15f}\n'.format(1, d1))
print(
    'The exact and approximate partial derivatives of f w.r.t. x2 at x=[0 0] are'
)
print('{:4.0f}  {:20.15f}\n'.format(0, d2))
示例#17
0
# Phi   = funbase(basis)                 # interpolation matrix

## SOLUTION

# Intialize Value Function
# c = zeros(n,1)                         # conditional value function basis coefficients

# Solve Bellman Equation and Compute Critical Exercise Prices
f = NLP(lambda p: K - np.exp(p) - delta * Value(p))

pcrit = np.empty(N + 1)

pcrit[0] = f.zero(0.0)

for t in range(N):
    v = np.zeros((1, n))
    for k in range(m):
        pnext = Value.nodes + e[k]
        v += w[k] * np.maximum(K - np.exp(pnext), delta * Value(pnext))

    Value[:] = v
    pcrit[t + 1] = f.broyden(pcrit[t])

# Print Critical Exercise Price 300 Periods to Expiration

print('Critical Exercise Price 300 Periods to Expiration')
print('   Critical Price  = {:5.2f}'.format(np.exp(pcrit[-1])))

# Plot Critical Exercise Prices
demo.figure('American Put Option Optimal Exercise Boundary',
            'Periods Remaining Until Expiration', 'Exercise Price')
示例#18
0
u = 50  # weekly unemp. benefit
v = 60  # weekly value of leisure
pfind = 0.90  # prob. of finding job
pfire = 0.10  # prob. of being fired
delta = 0.99  # discount factor

# State Space
S = np.array([1, 2])  # vector of states
n = S.size  # number of states

# Action Space (idle=1, active=2)
X = ['idle', 'active']  # vector of actions
m = len(X)  # number of actions

# Reward Function
f = np.zeros((m, n))
f[0] = v  # gets leisure
f[1, 0] = u  # gets benefit

# State Transition Probability Matrix
P = np.zeros((m, n, n))
P[0, :, 0] = 1  # remains unemployed
P[1, 0, 0] = 1 - pfind  # finds no job
P[1, 0, 1] = pfind  # finds job
P[1, 1, 0] = pfire  # gets fired
P[1, 1, 1] = 1 - pfire  # keeps job

# Model Structure
model = DDPmodel(f, P, delta)

## Solution
示例#19
0
S1, S2 = gridmake(s1,s2)    # combined state grid
n = n1 * n2                    # total number of states

# Action Space (keep='K', replace='R')
X = np.array(['Keep','Replace'])          # keep or replace
m = X.size                 	# number of actions

# Reward Function
f = np.empty((m, n))
y = (-0.2 * S1 ** 2 + 2 * S1 +8) * S2  # yield per lactation
f[0] = price * y
f[1] = f[0] - cost
f[0, S1 == 10] = -np.inf           # force replace at lactation 10

# State Transition Probability Matrix
P = np.zeros((2, n1, n2, n1, n2))
for i in range(n1):
    for j in range(n2):
        if i < 9:
            P[0, i, j, i+1, j] = 1     # Raise lactation number by 1, if keep
        else:
            P[0, i, j, 0] = 0.2, 0.6, 0.2

        P[1, i, j, 0] = 0.2, 0.6, 0.2       # Optional replacement

P.shape = 2, n, n


# Model Structure
model = DDPmodel(f, P, delta).solve(print=True)
示例#20
0
  
# Model Parameters
A       = 6                                # maximum asset age 
alpha   = np.array([50, -2.5, -2.5])       # production function coefficients
kappa   = 40                               # net replacement cost
pbar    = 1                                # long-run mean unit profit
gamma   = 0.5                              # unit profit autoregression coefficient
sigma   = 0.15                             # standard deviation of unit profit shock
delta   = 0.9                              # discount factor 

# Continuous State Shock Distribution
m = 5                                      # number of unit profit shocks
[e,w] = qnwnorm(m,0,sigma ** 2)               # unit profit shocks and probabilities

# Deterministic Discrete State Transitions
h = np.zeros((2, A))
h[0, :-1] = np.arange(1, A)

# Approximation Structure
n  = 200                                   # number of collocation nodes
pmin = 0                                   # minimum unit profit
pmax = 2                                   # maximum unit profit
basis = BasisSpline(n, pmin, pmax, labels=['unit profit'])        # basis functions

# Model Structure

def profit(p, x, i, j):
    a = i + 1
    if j or a == A:
        return p * 50 - kappa
    else:
示例#21
0
# Model Parameters
u = 90  # unemployment benefit
v = 95  # benefit of pure leisure
wbar = 100  # long-run mean wage
gamma = 0.40  # wage reversion rate
p0 = 0.20  # probability of finding job
p1 = 0.90  # probability of keeping job
sigma = 5  # standard deviation of wage shock
delta = 0.95  # discount factor

# Continuous State Shock Distribution
m = 15  # number of wage shocks
e, w = qnwnorm(m, 0, sigma**2)  # wage shocks

# Stochastic Discrete State Transition Probabilities
q = np.zeros((2, 2, 2))
q[1, 0, 1] = p0
q[1, 1, 1] = p1
q[:, :, 0] = 1 - q[:, :, 1]

# Model Structure

# Approximation Structure
n = 150  # number of collocation nodes
wmin = 0  # minimum wage
wmax = 200  # maximum wage
basis = BasisSpline(n, wmin, wmax, labels=['wage'])  # basis functions


def reward(w, x, employed, active):
    if active: