# In[6]:

frm = '{:21} {:6.3f}  {:8.1e}     {:7.6f}'
prt = lambda d, t, n, x: print(frm.format(d, t, n, *x))

print('{:21} {:^6}  {:^8}     {:^7}\n{}'.format('Algorithm','Time','Norm','x','-' * 51));
prt('Newton minmax', t1, n1, x1)
prt('Newton semismooth', t2, n2, x2)


# ### Plot results
# Here we use the methods *ssmooth* and *minmax* from class **MCP** to compute the semi-smooth and minimax transformations.

# In[7]:

fig = plt.figure()
original = {'label':'Original', 'alpha':0.5, 'color':'gray'}
x = np.linspace(-0.5, 2.5, 500)

ax1 = fig.add_subplot(121, title='Difficult NCP', aspect=1,
                     xlabel='x', xlim=[-0.5, 2.5], ylim=[-1, 1.5])
ax1.axhline(ls='--', color='gray')
ax1.plot(x, billups(x)[0], **original)
ax1.plot(x, Billups.ssmooth(x), label='Semismooth')
ax1.plot(x, Billups.minmax(x), label='Minmax')
ax1.legend(loc='best')

x = np.linspace(-0.03, 0.03, 500)
ax2 = fig.add_subplot(122, title='Difficult NCP Magnified', aspect=1,
                      xlabel='x', xlim = [-.035, .035], ylim=[ -.01, .06])
ax2.axhline(ls='--', color='gray')
Beispiel #2
0
from demos.setup import np, plt

from compecon.quad import qnwlogn
from compecon.tools import nodeunif
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm

# Univariate Taylor approximation

x = np.linspace(-1, 1, 100)
y = (x + 1) * np.exp(2 * x)
y1 = 1 + 3 * x
y2 = 1 + 3 * x + 8 * x**2

plt.figure(figsize=[6, 6])
plt.plot(x, y, 'k', linewidth=3, label='Function')
plt.plot(x, y1, 'b', linewidth=3, label='1st order approximation')
plt.plot(x, y2, 'r', linewidth=3, label='2nd order approximation')
plt.legend()
plt.xticks([-1, 0, 1])
plt.show()

## Bivariate Taylor approximation
nplot = [101, 101]
a = [0, -1]
b = [2, 1]
x1, x2 = nodeunif(nplot, a, b)
x1.shape = nplot
x2.shape = nplot

y = np.exp(x2) * x1**2
demo.figure('Value Function', 'Wealth', 'Value')
plt.plot(Wealth, np.c_[v, vlq])
demo.annotate(sstar, vstar,'$s^*$ = %.2f\n$V^*$ = %.2f' % (sstar, vstar),'bo', (10, -7))
plt.legend(['Chebychev Collocation','L-Q Approximation'], loc= 'upper left')



# Plot Shadow Price Function
demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(Wealth, np.c_[pr, plq])
demo.annotate(sstar, pstar,'$s^*$ = %.2f\n$\lambda^*$ = %.2f' % (sstar, pstar), 'bo', (10, 7))
plt.legend(['Chebychev Collocation','L-Q Approximation'])


# Plot Chebychev Collocation Residual and Approximation Error
plt.figure(figsize=[12, 6])
demo.subplot(1, 2, 1, 'Chebychev Collocation Residual\nand Approximation Error', 'Wealth', 'Residual/Error')
plt.plot(Wealth, np.c_[S.resid, v - vtrue], Wealth, np.zeros_like(Wealth), 'k--')
plt.legend(['Residual','Error'], loc='lower right')

# Plot Linear-Quadratic Approximation Error
demo.subplot(1, 2, 2, 'Linear-Quadratic Approximation Error', 'Wealth', 'Error')
plt.plot(Wealth, vlq - vtrue)


# Plot State and Policy Paths
opts = dict(spec='r*', offset=(-5, -5), fs=11, ha='right')

data[['Wealth', 'Investment']].plot()
plt.title('State and Policy Paths')
demo.annotate(T, sstar, 'steady-state wealth\n = %.2f' % sstar, **opts)
for ifunc, ff in enumerate(funcs):

    # Construct interpolants
    C = BasisChebyshev(n, a, b, f=ff)
    S = BasisSpline(n, a, b, f=ff)
    L = BasisSpline(n, a, b, k=1, f=ff)

    # Compute actual and fitted values on grid
    y = ff(x)  # actual
    yc = C(x)  # Chebychev approximant
    ys = S(x)  # cubic spline approximant
    yl = L(x)  # linear spline approximant

    # Plot function approximations
    plt.figure()
    ymin = np.floor(y.min())
    ymax = np.ceil(y.max())
    xlim = [a, b]
    ylim = [-0.2, 1.2] if ifunc == 2 else [ymin, ymax]

    subfig(1, x, y, xlim, ylim, 'Function')
    subfig(2, x, yc, xlim, ylim, 'Chebyshev')
    subfig(3, x, ys, xlim, ylim, 'Cubic Spline')
    subfig(4, x, yl, xlim, ylim, 'Linear Spline')

    # Plot function approximation error
    plt.figure()
    plt.plot(x, np.c_[yc - y, ys - y], linewidth=3)
    plt.xlabel('x')
    plt.ylabel('Approximation Error')
Beispiel #5
0
    'The exact and approximate second cross partial derivatives of f at x=[0 0] is'
)
print('{:4.0f}  {:20.15f}\n'.format(-1, d12))

# One may evaluate the accuracy of the Chebychev polynomial approximant by computing the approximation error on a
# highly refined grid of points:
nplot = [101, 101]  # chose grid discretization
X = nodeunif(nplot, [a, a], [b, b])  # generate refined grid for plotting
yapp = F(X)  # approximant values at grid nodes
yact = f(X)  # actual function values at grid points
error = (yapp - yact).reshape(nplot)
X1, X2 = X
X1.shape = nplot
X2.shape = nplot

fig = plt.figure(figsize=[15, 6])
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X1,
                X2,
                error,
                rstride=1,
                cstride=1,
                cmap=cm.coolwarm,
                linewidth=0,
                antialiased=False)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('error')
plt.title('Chebychev Approximation Error')

# The plot indicates that an order 11 by 11 Chebychev approximation scheme
    d[0] -= (r * p + k)
    d[1] += p ** -eta
    (p_0, p_T), (s_0, s_T) = F([0, T])
    return np.r_[d.flatten(), s_0 - s0, s_T]


storage = NLP(resid, F.c.flatten(), tnodes, T, n, F, r, k, eta, s0)
c = storage.broyden(print=True)
F.c = np.reshape(c, (2, n))

nplot = 501
t = np.linspace(0, T, nplot)
(p, s), (dp, ds) = F(t, [[0, 1]])
res_p = dp - r * p - k
res_s = ds + p ** -eta
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(t, res_p)
plt.title('Residuals')
plt.ylabel('d(price) residual')

plt.subplot(2, 1, 2)
plt.plot(t, res_s)
plt.xlabel('time')
plt.ylabel('d(storage) residual')


plt.figure()
plt.subplot(2, 1, 1)
plt.plot(t, p)
plt.title('Solution')
Beispiel #7
0
''' Randomly generate starting point '''
problem.x0 = 10 * np.random.randn(1)
''' Compute root using Newton method '''
t0 = tic()
x1 = problem.newton()
t1 = 100 * toc(t0)
n1, x_newton = problem.fnorm, problem.x_sequence
''' Compute root using Broyden method '''
t0 = tic()
x2 = problem.broyden()
t2 = 100 * toc(t0)
n2, x_broyden = problem.fnorm, problem.x_sequence
''' Print results '''
print('Hundredths of seconds required to compute root of exp(-x)-1,')
print('via Newton and Broyden methods, starting at x = %4.2f.' % problem.x0)
print('\nMethod      Time   Norm of f   Final x')
print('Newton  %8.2f    %8.0e     %5.2f' % (t1, n1, x1))
print('Broyden %8.2f    %8.0e     %5.2f' % (t2, n2, x2))
''' View current options for solver '''
print(problem.opts)
''' Describe the options '''
print(problem.opts.__doc__)
''' Plot the convergence '''
b = -abs(problem.x0)
a = -b
xx = np.linspace(a, b, 100)

fig = plt.figure()
plt.plot(xx, f(xx)[0], 'b-')
plt.plot(x_newton, f(x_newton)[0], 'ro:')
plt.show()
Beispiel #8
0
and compute condition numbers of associated interpolation matrices
"""

# Runge function
runge = lambda x: 1 / (1 + 25 * x**2)

# Set points of approximation interval
a, b = -1, 1

# Construct plotting grid
nplot = 1001
x = np.linspace(a, b, nplot)
y = runge(x)

# Plot Runge's Function
fig1 = plt.figure(figsize=[6, 9])
ax1 = fig1.add_subplot(211,
                       title="Runge's Function",
                       xlabel='',
                       ylabel='y',
                       xticks=[])
ax1.plot(x, y)
ax1.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)

# Initialize data matrices
n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))

# Compute approximation errors on refined grid and interpolation matrix condition numbers
""" Approximating using the CompEcon toolbox """

'''Univariate approximation'''
# Define function and derivative
f1 = lambda x: np.exp(-2 * x)
d1 = lambda x: -2 * np.exp(-2 * x)

# Fit approximant
n, a, b = 10, -1, 1
f1fit = BasisChebyshev(n, a, b, f=f1)


# Graph approximation error for function and derivative
axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]}
x = np.linspace(a, b, 1001)
fig = plt.figure(figsize=[12, 6])

ax1 = fig.add_subplot(121, title='Function approximation error', **axopts)
ax1.axhline(linestyle='--', color='gray', linewidth=2)
ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax1.plot(x, f1fit(x) - f1(x))

ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts)
ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2)
ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax2.plot(x, f1fit(x, 1) - d1(x))


''' Bivariate Interpolation '''
# Define function
f2 = lambda x: np.cos(x[0]) / np.exp(x[1])
Beispiel #10
0
        if abs(i - j) > 1:
            AA[i,j] = 0

n = np.hstack((np.arange(50, 250, 50), np.arange(300, 1100, 100)))
ratio = np.empty(n.size)
for k in range(n.size):
    A = AA[:n[k], :n[k]]
    b = bb[:n[k]]
    tt = tic()
    for i in range(100):
        x = solve(A, b)

    toc1 = toc(tt)

    S = csc_matrix(A)
    tt = tic()
    for i in range(100):
        x = spsolve(S, b)

    toc2 = toc(tt)
    ratio[k] = toc2 / toc1

# Plot effort ratio
plt.figure(figsize=[6, 6])
plt.plot(n, ratio)
plt.xlabel('n')
plt.ylabel('Ratio')
plt.title('Ratio of Sparse Solve Time to Full Solve Time')
plt.show()

Beispiel #11
0
print('\tnorm 1 = {:6.4f},   norm 2 = {:6.4f}'.format(q1, q2))


# Compute Function Metrics
a, b = 0, 2
f = lambda x: x**3 + x**2 + 1
g = lambda x: x**3 + 2
p1, p2 = 1, 2
q1 = quad(lambda x: np.abs(f(x)-g(x)) ** p1, a, b)[0] ** (1 / p1)
q2 = quad(lambda x: np.abs(f(x)-g(x)) ** p2, a, b)[0] ** (1 / p2)
print('\nCompute function metrics')
print('\tnorm 1 = {:6.4f},   norm 2 = {:6.4f}'.format(q1, q2))

# Illustrate function metrics
x = np.linspace(a, b, 200)
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 1)
plt.plot([0, 2], [0, 0], 'k:', linewidth=4)
plt.plot(x, f(x) - g(x), 'b', linewidth=4, label='f - g')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([0, 1, 2])
plt.yticks([-1, 0, 1, 2, 3])
plt.title('f - g')

plt.subplot(1, 2, 2)
plt.plot(x, np.abs(f(x) - g(x)), 'b', linewidth=4, label='f - g')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([0, 1, 2])
plt.yticks([0, 1, 2, 3])