from compecon import demo import numpy as np import matplotlib.pyplot as plt from compecon import NLP # ### Define a NLP problem # # Here, we set convergence tolerance `tol=1e-20` and the option `all_x=True` to record all values taken by $x$ from the initial guess `x0=2.0` to the final solution. These values will be stored in the `.x_sequence` attribute. # # We also define `err` to compute the base-10 logarithm of the error (the gap between the current iteration and the solution). # In[2]: A = NLP(lambda x: (np.exp(x)-1, np.exp(x)), all_x=True, tol=1e-20) err = lambda z: np.log10(np.abs(z)).flatten() x0 = 2.0 # ### Solve the problem # #### * Using Newton's method # In[3]: A.newton(x0) err_newton = err(A.x_sequence) # #### * Using Broyden's method
# The ```solve``` method returns a pandas ```DataFrame```. # In[11]: S = model.solve() S.head() # ## Analysis # # ### Plot Action-Contingent Value Functions # In[12]: # Compute and Plot Critical Unit Profit Contributions pcrit = [ NLP(lambda s: model.Value_j(s)[i].dot([1, -1])).broyden(0.0)[0] for i in range(A) ] vcrit = [model.Value(s)[i] for i, s in enumerate(pcrit)] # In[13]: fig1 = demo.figure('Action-Contingent Value Functions', 'Net Unit Profit', 'Value', figsize=[10, 5]) cc = np.linspace(0.3, 0.9, model.dims.ni) for a, i in enumerate(dstates): plt.plot(S.loc[i, 'value[keep]'],
return price * s - kappa + delta * vhat(c, h(0)) def vhat0(c, s): return delta * vhat(c, h(s)) def resid(c, s=snodes): return vhat(c, s) - np.maximum(vhat0(c, s), vhat1(c, s)) # ### Solve collocation equation # In[6]: cc = NLP(resid).broyden(np.zeros(2)) # ### Compute critical biomass # In[7]: scrit = NLP(lambda s: vhat0(cc, s) - vhat1(cc, s)).broyden(0.0)[0] # In[8]: scrit # ## ANALYSIS # ### Compute refined state grid
# In[4]: T = 300 pcrit = np.empty(T + 1) # The critical exercise price is the price at which the value of exercising the option $K-\exp(p)$ equals the discounted expected value of keeping the option one more period $\delta E_\epsilon V(p + \epsilon)$. To find it, we set it as a nonlinear rootfinding problem by using the ```NLP``` class; here we assume that the option strike price is $K=1$ and that the discount factor is $\delta=0.9998$ # In[5]: K = 1.0 delta = 0.9998 f = NLP(lambda p: K - np.exp(p) - delta * Value(p)) # Notice that we have not defined the ```Value(p)``` function yet. This function is unknown, so we are going to approximate it with a cubic spline, setting 500 nodes between -1 and 1. Since the basis is expressed in terms of log-prices, this interval corresponds to prices between 0.3679 and 2.7183. # In[6]: n = 500 pmin = -1 # minimum log price pmax = 1 # maximum log price Value = BasisSpline(n, pmin, pmax, labels=['logprice'], l=['value']) print(Value)
f, J = cournot(q) step = -np.linalg.solve(J, f) q += step if np.linalg.norm(step) < 1.e-10: break print(q) ''' Generate data for contour plot ''' n = 100 q1 = np.linspace(0.1, 1.5, n) q2 = np.linspace(0.1, 1.5, n) z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T ''' Using a NLP object ''' q = np.array([0.2, 0.2]) cournot_problem = NLP(cournot)#, q) q_star, fq_star = cournot_problem.newton(q) print(q_star) ''' Plot figures ''' steps_options = {'marker': 'o', 'color': (0.2, 0.2, .81), 'linewidth': 1.0, 'markersize': 6, 'markerfacecolor': 'red', 'markeredgecolor': 'red'} contour_options = {'levels': [0.0], 'colors': '0.25', 'linewidths': 0.5}
T, n = 1, 15 tnodes = BasisChebyshev(n - 1, 0, T).nodes F = BasisChebyshev(n, 0, T, y=np.ones((2, n))) def resid(c, tnodes, T, n, F, r, k, eta, s0): F.c = np.reshape(c[:], (2, n)) (p, s), d = F(tnodes, [[0, 1]]) d[0] -= (r * p + k) d[1] += p ** -eta (p_0, p_T), (s_0, s_T) = F([0, T]) return np.r_[d.flatten(), s_0 - s0, s_T] storage = NLP(resid, F.c.flatten(), tnodes, T, n, F, r, k, eta, s0) c = storage.broyden(print=True) F.c = np.reshape(c, (2, n)) nplot = 501 t = np.linspace(0, T, nplot) (p, s), (dp, ds) = F(t, [[0, 1]]) res_p = dp - r * p - k res_s = ds + p ** -eta plt.figure() plt.subplot(2, 1, 1) plt.plot(t, res_p) plt.title('Residuals') plt.ylabel('d(price) residual') plt.subplot(2, 1, 2)
[e, w] = qnwnorm(m, mu, sigma**2) # We are going to compute the critical exercise price in terms of the time to expiration, up to an horizon of $T=300$ periods. First we allocate memory for the critical prices: # In[4]: T = 300 pcrit = np.empty(T + 1) # The critical exercise price is the price at which the value of exercising the option $K-\exp(p)$ equals the discounted expected value of keeping the option one more period $\delta E_\epsilon V(p + \epsilon)$. To find it, we set it as a nonlinear rootfinding problem by using the ```NLP``` class; here we assume that the option strike price is $K=1$ and that the discount factor is $\delta=0.9998$ # In[5]: K = 1.0 delta = 0.9998 f = NLP(lambda p: K - np.exp(p) - delta * Value(p)) # Notice that we have not defined the ```Value(p)``` function yet. This function is unknown, so we are going to approximate it with a cubic spline, setting 500 nodes between -1 and 1. Since the basis is expressed in terms of log-prices, this interval corresponds to prices between 0.3679 and 2.7183. # In[6]: n = 500 pmin = -1 # minimum log price pmax = 1 # maximum log price Value = BasisSpline(n, pmin, pmax, labels=['logprice'], l=['value']) print(Value) # In the last expression, by passing the option `l` with a one-element list we are telling the ```BasisSpline``` class that we a single function named "value". On creation, the function will be set by default to $V(p)=0$ for all values of $p$, which conveniently corresponds to the terminal condition of this problem. # ## Finding the critical exercise prices #
alpha = 0.6 beta = np.array([0.6, 0.8]) ''' Set up the Cournot function ''' ''' Generate data for contour plot ''' n = 100 q1 = np.linspace(0.3, 1.1, n) q2 = np.linspace(0.4, 1.2, n) z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T ''' Using a NLP object ''' q = np.array([1.0, 0.5]) cournot_problem = NLP(cournot)#, q) q_star, fq_star = cournot_problem.newton(q) print(q_star) ''' Plot figures ''' steps_options = {'marker': 'o', 'color': (0.2, 0.2, .81), 'linewidth': 1.0, 'markersize': 6, 'markerfacecolor': 'red', 'markeredgecolor': 'red'} contour_options = {'levels': [0.0], 'colors': '0.25', 'linewidths': 0.5}
def resid(c): S.c = c # update interpolation coefficients q = S(p) # compute quantity supplied at price nodes return p - q * (p ** (eta+1) / eta) - alpha * np.sqrt(q) - q ** 2 # Notice that `resid` only takes one argument. The other parameters (`Q`, `p`, `eta`, `alpha`) should be declared as such in the main script, were Python's scoping rules will find them. # ### Solve for effective supply function # # Class `NLP` defines nonlinear problems. It can be used to solve `resid` by Broyden's method. # In[7]: cournot = NLP(resid) S.c = cournot.broyden(S.c, tol=1e-12) # ### Plot demand and effective supply for m=5 firms # In[8]: prices = np.linspace(a, b, 501) fig1 = demo.figure('Cournot Effective Firm Supply Function', 'Quantity', 'Price', [0, 4], [0.5, 2]) plt.plot(5 * S(prices), prices, D(prices), prices) plt.legend(('Supply','Demand'))
fjac = [[y * np.exp(x), np.exp(x) - 2], [y, x - 3 * y**2]] return np.array(fval), np.array(fjac) ''' Parameters and initial value ''' alpha = 0.6 beta = np.array([0.6, 0.8]) ''' Set up the Cournot function ''' ''' Generate data for contour plot ''' n = 100 q1 = np.linspace(0.3, 1.1, n) q2 = np.linspace(0.4, 1.2, n) z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T ''' Using a NLP object ''' q = np.array([1.0, 0.5]) cournot_problem = NLP(cournot) #, q) q_star, fq_star = cournot_problem.newton(q) print(q_star) ''' Plot figures ''' steps_options = { 'marker': 'o', 'color': (0.2, 0.2, .81), 'linewidth': 1.0, 'markersize': 6, 'markerfacecolor': 'red', 'markeredgecolor': 'red' } contour_options = {'levels': [0.0], 'colors': '0.25', 'linewidths': 0.5} Q1, Q2 = np.meshgrid(q1, q2)
methods. Initial values generated randomly. True root is x1=1 x2=1. """ from demos.setup import np, tic, toc from compecon import NLP ''' Set up the problem ''' def f(x): fval = [200 * x[0] * (x[1] - x[0] ** 2) + 1 - x[0], 100 * (x[0] ** 2 - x[1])] fjac = [[200 * (x[1] - x[0] ** 2) - 400 * x[0] ** 2 - 1, 200 * x[0]], [200 * x[0], -100]] return np.array(fval), np.array(fjac) problem = NLP(f) ''' Randomly generate starting point ''' problem.x0 = np.random.randn(2) ''' Compute root using Newton method ''' t0 = tic() x1 = problem.newton() t1 = 100 * toc(t0) n1 = problem.fnorm '''Compute root using Broyden method ''' t0 = tic() x2 = problem.broyden() t2 = 100 * toc(t0) n2 = problem.fnorm
T, n = 1, 15 tnodes = BasisChebyshev(n - 1, 0, T).nodes F = BasisChebyshev(n, 0, T, y=np.ones((2, n))) def resid(c, tnodes, T, n, F, r, k, eta, s0): F.c = np.reshape(c[:], (2, n)) (p, s), d = F(tnodes, [[0, 1]]) d[0] -= (r * p + k) d[1] += p**-eta (p_0, p_T), (s_0, s_T) = F([0, T]) return np.r_[d.flatten(), s_0 - s0, s_T] storage = NLP(resid, F.c.flatten(), tnodes, T, n, F, r, k, eta, s0) c = storage.broyden(print=True) F.c = np.reshape(c, (2, n)) nplot = 501 t = np.linspace(0, T, nplot) (p, s), (dp, ds) = F(t, [[0, 1]]) res_p = dp - r * p - k res_s = ds + p**-eta plt.figure() plt.subplot(2, 1, 1) plt.plot(t, res_p) plt.title('Residuals') plt.ylabel('d(price) residual') plt.subplot(2, 1, 2)
# In[3]: n, a, b = 21, 0.5, 2.5 Q = BasisChebyshev(n, a, b) c0 = np.zeros(n) c0[0] = 2 p = Q.nodes # ### Solve for effective supply function # In[4]: monopoly = NLP(resid) Q.c = monopoly.broyden(c0) # ### Setup plot # In[5]: nplot = 1000 p = np.linspace(a, b, nplot) rplot = resid(Q.c) # ### Plot effective supply
Initial values generated randomly. Some algorithms may fail to converge, depending on the initial value. True fixedpoint is x = -0.09 y=-0.46. """ from demos.setup import np, tic, toc from compecon import NLP np.random.seed(12) ''' Set up the problem ''' def g(z): x, y = z return np.array([x **2 + y ** 3, x * y - 0.5]) problem_as_fixpoint = NLP(g, maxit=1500) ''' Equivalent Rootfinding Formulation''' def f(z): x, y = z fval = [x - x ** 2 - y ** 3, y - x * y + 0.5] fjac = [[1 - 2 * x, -3 * y **2], [-y, 1 - x]] return np.array(fval), np.array(fjac) problem_as_zero = NLP(f, maxit=1500) '''% Randomly generate starting point''' xinit = np.random.randn(2)
Compute root of f(x)=exp(-x)-1 using Newton and secant methods. Initial value generated randomly. True root is x=0. """ from demos.setup import np, plt, tic, toc from numpy.linalg import norm from compecon import NLP ''' Set up the problem ''' def f(x): fval = np.exp(-x) - 1 fjac = -np.exp(-x) return fval, fjac problem = NLP(f, all_x=True) ''' Randomly generate starting point ''' problem.x0 = 10 * np.random.randn(1) ''' Compute root using Newton method ''' t0 = tic() x1 = problem.newton() t1 = 100 * toc(t0) n1, x_newton = problem.fnorm, problem.x_sequence ''' Compute root using Broyden method ''' t0 = tic() x2 = problem.broyden() t2 = 100 * toc(t0) n2, x_broyden = problem.fnorm, problem.x_sequence ''' Print results ''' print('Hundredths of seconds required to compute root of exp(-x)-1,') print('via Newton and Broyden methods, starting at x = %4.2f.' % problem.x0)
DEMSLV01 Compute root of f(x)=exp(-x)-1 Compute root of f(x)=exp(-x)-1 using Newton and secant methods. Initial value generated randomly. True root is x=0. """ from demos.setup import np, plt, tic, toc from numpy.linalg import norm from compecon import NLP ''' Set up the problem ''' def f(x): fval = np.exp(-x) - 1 fjac = -np.exp(-x) return fval, fjac problem = NLP(f, all_x=True) ''' Randomly generate starting point ''' problem.x0 = 10 * np.random.randn(1) ''' Compute root using Newton method ''' t0 = tic() x1 = problem.newton() t1 = 100 * toc(t0) n1, x_newton = problem.fnorm, problem.x_sequence ''' Compute root using Broyden method ''' t0 = tic() x2 = problem.broyden() t2 = 100 * toc(t0)
Compute fixedpoint of f(x) = x^0.5 using Newton, Broyden, and function iteration methods. Initial values generated randomly. Some alrorithms may fail to converge, depending on the initial value. True fixedpoint is x=1. """ from demos.setup import np, tic, toc from compecon import NLP ''' Randomly generate starting point ''' xinit = np.random.rand(1) + 0.5 ''' Set up the problem ''' def g(x): return np.sqrt(x) problem_as_fixpoint = NLP(g, xinit) ''' Equivalent Rootfinding Formulation ''' def f(x): fval = x - np.sqrt(x) fjac = 1 - 0.5 / np.sqrt(x) return fval, fjac problem_as_zero = NLP(f, xinit) ''' Compute fixed-point using Newton method ''' t0 = tic() x1 = problem_as_zero.newton() t1 = 100 * toc(t0) n1 = problem_as_zero.fnorm
for it in range(40): f, J = cournot(q) step = -np.linalg.solve(J, f) q += step if np.linalg.norm(step) < 1.e-10: break print(q) ''' Generate data for contour plot ''' n = 100 q1 = np.linspace(0.1, 1.5, n) q2 = np.linspace(0.1, 1.5, n) z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T ''' Using a NLP object ''' q = np.array([0.2, 0.2]) cournot_problem = NLP(cournot) #, q) q_star, fq_star = cournot_problem.newton(q) print(q_star) ''' Plot figures ''' steps_options = { 'marker': 'o', 'color': (0.2, 0.2, .81), 'linewidth': 1.0, 'markersize': 6, 'markerfacecolor': 'red', 'markeredgecolor': 'red' } contour_options = {'levels': [0.0], 'colors': '0.25', 'linewidths': 0.5} Q1, Q2 = np.meshgrid(q1, q2)
__author__ = 'Randall' import numpy as np from numpy import log, exp, sqrt from scipy.stats import norm as Normal_distribution from compecon import NLP, MCP, LCP from compecon.tools import example, exercise ''' Example page 32 ''' example(32) f = NLP(lambda x: x**3 - 2) x = f.bisect(1, 2) print('x = ', x) ''' Example page 33 ''' example(33) g = NLP(lambda x: x**0.5) x = g.fixpoint(0.4) print('x = ', x) ''' Example page 35 ''' example(35) def cournot(q): c = np.array([0.6, 0.8]) eta = 1.6 e = -1 / eta s = q.sum() fval = s**e + e * s**(e - 1) * q - c * q fjac = e*s**(e-1) * np.ones([2,2]) + e * s ** (e-1) * np.identity(2) +\ (e-1)*e*s **(e-2)* np.outer(q, [1,1]) - np.diag(c) return fval, fjac
# ### Approximation structure # In[3]: n, a, b = 21, 0.5, 2.5 Q = BasisChebyshev(n, a, b) c0 = np.zeros(n) c0[0] = 2 p = Q.nodes # ### Solve for effective supply function # In[4]: monopoly = NLP(resid) Q.c = monopoly.broyden(c0) # ### Setup plot # In[5]: nplot = 1000 p = np.linspace(a, b, nplot) rplot = resid(Q.c) # ### Plot effective supply # In[6]: demo.figure("Monopolist's Effective Supply Curve", 'Quantity', 'Price')
Compute fixedpoint of f(x) = x^0.5 using Newton, Broyden, and function iteration methods. Initial values generated randomly. Some alrorithms may fail to converge, depending on the initial value. True fixedpoint is x=1. """ from demos.setup import np, tic, toc from compecon import NLP ''' Randomly generate starting point ''' xinit = np.random.rand(1) + 0.5 ''' Set up the problem ''' def g(x): return np.sqrt(x) problem_as_fixpoint = NLP(g, xinit) ''' Equivalent Rootfinding Formulation ''' def f(x): fval = x - np.sqrt(x) fjac = 1-0.5 / np.sqrt(x) return fval, fjac problem_as_zero = NLP(f, xinit) ''' Compute fixed-point using Newton method ''' t0 = tic() x1 = problem_as_zero.newton() t1 = 100 * toc(t0) n1 = problem_as_zero.fnorm
# In[6]: def resid(c,s=vhat.nodes): vhat.c = c return vhat(s) - np.maximum(vhat0(s), vhat1(s)) # ### Solve collocation equation # In[7]: cc = NLP(resid).broyden(vhat.c) # ### Compute critical biomass # In[8]: scrit = NLP(lambda s: vhat0(s)-vhat1(s)).broyden(0.0)[0] # ## ANALYSIS # ### Compute refined state grid # In[9]:
def resid(c): F.c = c # update basis coefficients f = F(x) # interpolate at basis nodes x return f ** -2 + f ** -5 - 2 * x # ### Compute function inverse # In[4]: c0 = np.zeros(n) # set initial guess for coeffs c0[0] = 0.2 problem = NLP(resid) F.c = problem.broyden(c0) # compute coeff by Broyden's method # ### Plot setup # In[5]: n = 1000 x = np.linspace(a, b, n) r = resid(F.c) # ### Plot function inverse
# In[1]: from compecon import demo import numpy as np import matplotlib.pyplot as plt from compecon import NLP # ### Define a NLP problem # # Here, we set convergence tolerance `tol=1e-20` and the option `all_x=True` to record all values taken by $x$ from the initial guess `x0=2.0` to the final solution. These values will be stored in the `.x_sequence` attribute. # # We also define `err` to compute the base-10 logarithm of the error (the gap between the current iteration and the solution). # In[2]: A = NLP(lambda x: (np.exp(x) - 1, np.exp(x)), all_x=True, tol=1e-20) err = lambda z: np.log10(np.abs(z)).flatten() x0 = 2.0 # ### Solve the problem # #### * Using Newton's method # In[3]: A.newton(x0) err_newton = err(A.x_sequence) # #### * Using Broyden's method # In[4]: