コード例 #1
0
def bounds(s, i, j):
    return np.zeros_like(s), np.full(s.shape, np.inf)
コード例 #2
0
# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(s, growth.Value(s, order=1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Wealth', 'Residual')
plt.plot(s, resid.T)
plt.hlines(0, smin, smax, 'k', '--')

## SIMULATION

# Simulate Model
T = 20
nrep = 50000
sinit = np.full((1, nrep), smin)

data = growth.simulate(T, sinit)

# Plot Simulated State Path

subdata = data[data['_rep'] < 3][['time', 'wealth', '_rep']]
subdata.pivot(index='time', columns='_rep', values='wealth').plot(legend=False,
                                                                  lw=1)
data.groupby('time')['wealth'].mean().plot(color='k', linestyle='--')
plt.title('Simulated and Expected Wealth')
plt.xlabel('Period')
plt.ylabel('Wealth')

# Plot Simulated Policy Path
コード例 #3
0
def bounds(s, i, j):
    return np.zeros_like(s), np.full(s.shape, np.inf)
コード例 #4
0
ファイル: demdp02.py プロジェクト: lnsongxf/CompEcon-python

'''
demo.figure('Bellman Equation Residual', 'Net Unit Profit', 'Percent Residual')
plt.plot(pr, 100 * (resid / model.Value(pr)).T)
# plot(pr,0*pr,'k--')
plt.legend(model.labels.i, loc='upper right')
'''

## SIMULATION

# Simulate Model

T = 50
nrep = 10000
sinit = np.full(nrep, pbar)
iinit = 0
data = model.simulate(T,sinit,iinit, seed=945)

# Print Ergodic Moments
frm = '\t{:<10s} = {:5.2f}'

print('\nErgodic Means')
print(frm.format('Price', data['unit profit'].mean()))
print(frm.format('Age', data.i.mean()))
print('\nErgodic Standard Deviations')
print(frm.format('Price', data['unit profit'].std()))
print(frm.format('Age', data.i.std()))


# Plot Simulated and Expected Continuous State Path
コード例 #5
0
# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Reservoir Level', 'Shadow Price')
plt.plot(s, model.Value(s, 1).T)

# Plot Residual
demo.figure('Bellman Equation Residual', 'Reservoir Level', 'Residual')
plt.plot(s,resid.T)
plt.hlines(0, smin, smax, 'k', '--')


## SIMULATION

# Simulate Model
T = 30
nrep = 100000
sinit = np.full((1, nrep), smin)
data = model.simulate(T, sinit, seed=945)

# Plot Simulated State Path
D = data[data['_rep'] < 3][['time', 'reservoir', '_rep']]
D.pivot(index='time', columns='_rep', values='reservoir').plot(legend=False, lw=1)
data.groupby('time')['reservoir'].mean().plot(color='k', linestyle='--')
plt.title('Simulated and Expected Reservoir Level')
plt.xlabel('Year')
plt.ylabel('Reservoir Level')

# Plot Simulated Policy Path
D = data[data['_rep'] < 3][['time', 'released', '_rep']]
D.pivot('time', '_rep', 'released').plot(legend=False, lw=1)
data.groupby('time')['released'].mean().plot(color='k', linestyle='--')
plt.title('Simulated and Expected Irrigation')
コード例 #6
0
cost = 0.2  # unit cost of harvest

# State Space
smin = 0  # minimum state
smax = 8  # maximum state
n = 200  # number of states
S = np.linspace(smin, smax, n)  # vector of states

# Action Space
xmin = 0  # minimum action
xmax = 6  # maximum action
m = 100  # number of actions
X = np.linspace(xmin, xmax, m)  # vector of actions

# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, S >= X[k]] = (X[k]**(1 - gamma)) / (1 - gamma) - cost * X[k]

# State Transition Function

g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k])**2
    g[k] = getindex(snext, S)

# Model Structure
model = DDPmodel(f, g, delta)
model.solve()

## Analysis
コード例 #7
0
# State Space
smin = 0                      # minimum state
smax = 8                      # maximum state
n = 200                    # number of states
S = np.linspace(smin, smax, n)  # vector of states

# Action Space
xmin = 0                      # minimum action
xmax = 6                      # maximum action
m = 100                    # number of actions
X = np.linspace(xmin, xmax, m)  # vector of actions


# Reward Function
f = np.full((m, n), -np.inf)
for k in range(m):
    f[k, S >= X[k]] = (X[k] ** (1 - gamma)) / (1 - gamma) - cost * X[k]

# State Transition Function

g = np.zeros_like(f)
for k in range(m):
    snext = alpha * (S - X[k]) - 0.5 * beta * (S - X[k]) ** 2
    g[k] = getindex(snext, S)



# Model Structure
model = DDPmodel(f, g, delta)
model.solve()
コード例 #8
0
ファイル: demslv14.py プロジェクト: lnsongxf/CompEcon-python

# In[3]:

A = np.array
as_ = A([9, 3, 18])
bs = A([1, 2, 1])
ad = A([42, 54, 51])
bd = A([3, 2, 1])
c = A([[0, 3, 9], [3, 0, 3], [6, 3, 0]])
params = (as_, bs, ad, bd, c)

# In[4]:

a = np.zeros(9)
b = np.full(9, np.inf)
x0 = np.zeros(9)
Market = MCP(market, a, b, x0, *params)

# In[5]:

x = Market.zero(print=True)

print('Function value at solution\n\t', Market.original(x))
print('Lower bound is binding\n\t', Market.a_is_binding)
print('Upper bound is binding\n\t', Market.b_is_binding)
# print(Market.ssmooth(x))
# print(Market.minmax(x))

# In[6]:
コード例 #9
0
def bounds(s, i, j):
    lb  = np.zeros_like(s[0])
    ub  = np.full(lb.shape, np.inf)
    return lb, ub
コード例 #10
0
                 main='Bellman Equation Residual',
                 xlab='Wage',
                 ylab='Percent Residual')
fig.draw()

resid = S['resid2'].reshape(ni, nj, -1)[0].T
demo.figure('Bellman Equation Residual', 'Wage', 'Percent Residual')
plt.plot(sr, resid)

# SIMULATION

# Simulate Model

T = 40
nrep = 10000
sinit = np.full((1, nrep), wbar)
iinit = 0
data = model.simulate(T, sinit, iinit, seed=945)

# Print Ergodic Moments
ff = '\t{:12s} = {:5.2f}'

print('\nErgodic Means')
print(ff.format('Wage', data['wage'].mean()))
print(ff.format('Employment', (data['i'] == 'employed').mean()))
print('\nErgodic Standard Deviations')
print(ff.format('Wage', data['wage'].std()))
print(ff.format('Employment', (data['i'] == 'employed').std()))

# Plot Expected Discrete State Path
コード例 #11
0
def bounds(s, i, j):
    lb = np.zeros_like(s[0])
    ub = np.full(lb.shape, np.inf)
    return lb, ub