from demos.setup import np, plt from compecon.quad import qnwlogn from compecon.tools import nodeunif from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm # Univariate Taylor approximation x = np.linspace(-1, 1, 100) y = (x + 1) * np.exp(2 * x) y1 = 1 + 3 * x y2 = 1 + 3 * x + 8 * x**2 plt.figure(figsize=[6, 6]) plt.plot(x, y, 'k', linewidth=3, label='Function') plt.plot(x, y1, 'b', linewidth=3, label='1st order approximation') plt.plot(x, y2, 'r', linewidth=3, label='2nd order approximation') plt.legend() plt.xticks([-1, 0, 1]) plt.show() ## Bivariate Taylor approximation nplot = [101, 101] a = [0, -1] b = [2, 1] x1, x2 = nodeunif(nplot, a, b) x1.shape = nplot x2.shape = nplot y = np.exp(x2) * x1**2
return p * (alpha[0] + alpha[1] * a + alpha[2] * a ** 2 ) def transition(p, x, i, j, in_, e): return pbar + gamma * (p - pbar) + e model = DPmodel(basis, profit, transition, # i=['a={}'.format(a+1) for a in range(A)], i=[a + 1 for a in range(A)], j=['keep', 'replace'], discount=delta, e=e, w=w, h=h) # SOLUTION S = model.solve() pr = np.linspace(pmin, pmax, 10 * n) # Plot Action-Contingent Value Functions pp = demo.qplot('unit profit', 'value_j', 'i', data=S, main='Action-Contingent Value Functions', xlab='Net Unit Profit', ylab='Value') print(pp) '''
frm = '{:21} {:6.3f} {:8.1e} {:7.6f}' prt = lambda d, t, n, x: print(frm.format(d, t, n, *x)) print('{:21} {:^6} {:^8} {:^7}\n{}'.format('Algorithm','Time','Norm','x','-' * 51)); prt('Newton minmax', t1, n1, x1) prt('Newton semismooth', t2, n2, x2) # ### Plot results # Here we use the methods *ssmooth* and *minmax* from class **MCP** to compute the semi-smooth and minimax transformations. # In[7]: fig = plt.figure() original = {'label':'Original', 'alpha':0.5, 'color':'gray'} x = np.linspace(-0.5, 2.5, 500) ax1 = fig.add_subplot(121, title='Difficult NCP', aspect=1, xlabel='x', xlim=[-0.5, 2.5], ylim=[-1, 1.5]) ax1.axhline(ls='--', color='gray') ax1.plot(x, billups(x)[0], **original) ax1.plot(x, Billups.ssmooth(x), label='Semismooth') ax1.plot(x, Billups.minmax(x), label='Minmax') ax1.legend(loc='best') x = np.linspace(-0.03, 0.03, 500) ax2 = fig.add_subplot(122, title='Difficult NCP Magnified', aspect=1, xlabel='x', xlim = [-.035, .035], ylim=[ -.01, .06]) ax2.axhline(ls='--', color='gray') ax2.plot(x, Billups.original(x), **original) ax2.plot(x, Billups.ssmooth(x), label='Semismooth')
from compecon.tools import gridmake, getindex ## DEMDDP07 Renewable resource model # Model Parameters delta = 0.9 # discount factor alpha = 4.0 # growth function parameter beta = 1.0 # growth function parameter gamma = 0.5 # demand function parameter cost = 0.2 # unit cost of harvest # State Space smin = 0 # minimum state smax = 8 # maximum state n = 200 # number of states S = np.linspace(smin, smax, n) # vector of states # Action Space xmin = 0 # minimum action xmax = 6 # maximum action m = 100 # number of actions X = np.linspace(xmin, xmax, m) # vector of actions # Reward Function f = np.full((m, n), -np.inf) for k in range(m): f[k, S >= X[k]] = (X[k] ** (1 - gamma)) / (1 - gamma) - cost * X[k] # State Transition Function
from compecon.tools import gridmake, getindex ## DEMDDP07 Renewable resource model # Model Parameters delta = 0.9 # discount factor alpha = 4.0 # growth function parameter beta = 1.0 # growth function parameter gamma = 0.5 # demand function parameter cost = 0.2 # unit cost of harvest # State Space smin = 0 # minimum state smax = 8 # maximum state n = 200 # number of states S = np.linspace(smin, smax, n) # vector of states # Action Space xmin = 0 # minimum action xmax = 6 # maximum action m = 100 # number of actions X = np.linspace(xmin, xmax, m) # vector of actions # Reward Function f = np.full((m, n), -np.inf) for k in range(m): f[k, S >= X[k]] = (X[k]**(1 - gamma)) / (1 - gamma) - cost * X[k] # State Transition Function g = np.zeros_like(f)
from compecon.tools import nodeunif from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm """ Approximating using the CompEcon toolbox """ '''Univariate approximation''' # Define function and derivative f1 = lambda x: np.exp(-2 * x) d1 = lambda x: -2 * np.exp(-2 * x) # Fit approximant n, a, b = 10, -1, 1 f1fit = BasisChebyshev(n, a, b, f=f1) # Graph approximation error for function and derivative axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]} x = np.linspace(a, b, 1001) fig = plt.figure(figsize=[12, 6]) ax1 = fig.add_subplot(121, title='Function approximation error', **axopts) ax1.axhline(linestyle='--', color='gray', linewidth=2) ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12) ax1.plot(x, f1fit(x) - f1(x)) ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts) ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2) ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12) ax2.plot(x, f1fit(x, 1) - d1(x)) ''' Bivariate Interpolation ''' # Define function f2 = lambda x: np.cos(x[0]) / np.exp(x[1])
def resid(c, tnodes, T, n, F, r, k, eta, s0): F.c = np.reshape(c[:], (2, n)) (p, s), d = F(tnodes, [[0, 1]]) d[0] -= (r * p + k) d[1] += p ** -eta (p_0, p_T), (s_0, s_T) = F([0, T]) return np.r_[d.flatten(), s_0 - s0, s_T] storage = NLP(resid, F.c.flatten(), tnodes, T, n, F, r, k, eta, s0) c = storage.broyden(print=True) F.c = np.reshape(c, (2, n)) nplot = 501 t = np.linspace(0, T, nplot) (p, s), (dp, ds) = F(t, [[0, 1]]) res_p = dp - r * p - k res_s = ds + p ** -eta plt.figure() plt.subplot(2, 1, 1) plt.plot(t, res_p) plt.title('Residuals') plt.ylabel('d(price) residual') plt.subplot(2, 1, 2) plt.plot(t, res_s) plt.xlabel('time') plt.ylabel('d(storage) residual')
import warnings warnings.simplefilter('ignore') """ Uniform-node and Chebyshev-node polynomial approximation of Runge's function and compute condition numbers of associated interpolation matrices """ # Runge function runge = lambda x: 1 / (1 + 25 * x**2) # Set points of approximation interval a, b = -1, 1 # Construct plotting grid nplot = 1001 x = np.linspace(a, b, nplot) y = runge(x) # Plot Runge's Function fig1 = plt.figure(figsize=[6, 9]) ax1 = fig1.add_subplot(211, title="Runge's Function", xlabel='', ylabel='y', xticks=[]) ax1.plot(x, y) ax1.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18) # Initialize data matrices n = np.arange(3, 33, 2) nn = n.size
frm = '{:21} {:6.3f} {:8.1e} {:7.6f}' prt = lambda d, t, n, x: print(frm.format(d, t, n, *x)) print('{:21} {:^6} {:^8} {:^7}\n{}'.format('Algorithm', 'Time', 'Norm', 'x', '-' * 51)) prt('Newton minmax', t1, n1, x1) prt('Newton semismooth', t2, n2, x2) # ### Plot results # Here we use the methods *ssmooth* and *minmax* from class **MCP** to compute the semi-smooth and minimax transformations. # In[7]: fig = plt.figure() original = {'label': 'Original', 'alpha': 0.5, 'color': 'gray'} x = np.linspace(-0.5, 2.5, 500) ax1 = fig.add_subplot(121, title='Difficult NCP', aspect=1, xlabel='x', xlim=[-0.5, 2.5], ylim=[-1, 1.5]) ax1.axhline(ls='--', color='gray') ax1.plot(x, billups(x)[0], **original) ax1.plot(x, Billups.ssmooth(x), label='Semismooth') ax1.plot(x, Billups.minmax(x), label='Minmax') ax1.legend(loc='best') x = np.linspace(-0.03, 0.03, 500) ax2 = fig.add_subplot(122,
""" Approximating using the CompEcon toolbox """ '''Univariate approximation''' # Define function and derivative f1 = lambda x: np.exp(-2 * x) d1 = lambda x: -2 * np.exp(-2 * x) # Fit approximant n, a, b = 10, -1, 1 f1fit = BasisChebyshev(n, a, b, f=f1) # Graph approximation error for function and derivative axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]} x = np.linspace(a, b, 1001) fig = plt.figure(figsize=[12, 6]) ax1 = fig.add_subplot(121, title='Function approximation error', **axopts) ax1.axhline(linestyle='--', color='gray', linewidth=2) ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12) ax1.plot(x, f1fit(x) - f1(x)) ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts) ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2) ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12) ax2.plot(x, f1fit(x, 1) - d1(x)) ''' Bivariate Interpolation ''' # Define function
# Set points of approximation interval # In[3]: a, b = -1, 1 # Construct plotting grid # In[4]: nplot = 1001 x = np.linspace(a, b, nplot) y = runge(x) # Plot Runge's Function # Initialize data matrices # In[5]: n = np.arange(3, 33, 2) nn = n.size errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2)) nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))
'\nThe approximate second derivative of exp(-x) at x=0 is {:.15f}'.format( d2)) print( "The 'exact' second derivative of exp(-x) at x=0 is {:.15f}".format(1)) # ... and one may even evaluate the approximant's definite integral between the left endpoint a and x: int = F(x, -1) print('\nThe approximate integral of exp(-x) between x=-1 and x=0 is {:.15f}'. format(int)) print("The 'exact' integral of exp(-x) between x=-1 and x=0 is {:.15f}". format(np.exp(1) - 1)) # One may evaluate the accuracy of the Chebychev polynomial approximant by # computing the approximation error on a highly refined grid of points: ngrid = 5001 # number of grid nodes xgrid = np.linspace(a, b, ngrid) # generate refined grid for plotting yapp = F(xgrid) # approximant values at grid nodes yact = f(xgrid) # actual function values at grid points demo.figure('Chebychev Approximation Error for exp(-x)', 'x', 'Error') plt.plot(xgrid, yapp - yact) plt.plot(xgrid, np.zeros(ngrid), 'k--', linewidth=2) # The plot indicates that an order 10 Chebychev approximation scheme, produces approximation errors # no bigger in magnitude than 6x10^-10. The approximation error exhibits the "Chebychev equioscillation # property", oscilating relatively uniformly throughout the approximation domain. # # This commonly occurs when function being approximated is very smooth, as is the case here but should not # be expected when the function is not smooth. Further notice how the approximation error is exactly 0 at the # approximation nodes --- which is true by contruction.
print('\nCompute function norm') print('\tnorm 1 = {:6.4f}, norm 2 = {:6.4f}'.format(q1, q2)) # Compute Function Metrics a, b = 0, 2 f = lambda x: x**3 + x**2 + 1 g = lambda x: x**3 + 2 p1, p2 = 1, 2 q1 = quad(lambda x: np.abs(f(x)-g(x)) ** p1, a, b)[0] ** (1 / p1) q2 = quad(lambda x: np.abs(f(x)-g(x)) ** p2, a, b)[0] ** (1 / p2) print('\nCompute function metrics') print('\tnorm 1 = {:6.4f}, norm 2 = {:6.4f}'.format(q1, q2)) # Illustrate function metrics x = np.linspace(a, b, 200) plt.figure(figsize=[12, 4]) plt.subplot(1, 2, 1) plt.plot([0, 2], [0, 0], 'k:', linewidth=4) plt.plot(x, f(x) - g(x), 'b', linewidth=4, label='f - g') plt.xlabel('x') plt.ylabel('y') plt.xticks([0, 1, 2]) plt.yticks([-1, 0, 1, 2, 3]) plt.title('f - g') plt.subplot(1, 2, 2) plt.plot(x, np.abs(f(x) - g(x)), 'b', linewidth=4, label='f - g') plt.xlabel('x') plt.ylabel('y') plt.xticks([0, 1, 2])
def resid(c, tnodes, T, n, F, r, k, eta, s0): F.c = np.reshape(c[:], (2, n)) (p, s), d = F(tnodes, [[0, 1]]) d[0] -= (r * p + k) d[1] += p**-eta (p_0, p_T), (s_0, s_T) = F([0, T]) return np.r_[d.flatten(), s_0 - s0, s_T] storage = NLP(resid, F.c.flatten(), tnodes, T, n, F, r, k, eta, s0) c = storage.broyden(print=True) F.c = np.reshape(c, (2, n)) nplot = 501 t = np.linspace(0, T, nplot) (p, s), (dp, ds) = F(t, [[0, 1]]) res_p = dp - r * p - k res_s = ds + p**-eta plt.figure() plt.subplot(2, 1, 1) plt.plot(t, res_p) plt.title('Residuals') plt.ylabel('d(price) residual') plt.subplot(2, 1, 2) plt.plot(t, res_s) plt.xlabel('time') plt.ylabel('d(storage) residual') plt.figure()