from demos.setup import np, plt

from compecon.quad import qnwlogn
from compecon.tools import nodeunif
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm

# Univariate Taylor approximation

x = np.linspace(-1, 1, 100)
y = (x + 1) * np.exp(2 * x)
y1 = 1 + 3 * x
y2 = 1 + 3 * x + 8 * x**2

plt.figure(figsize=[6, 6])
plt.plot(x, y, 'k', linewidth=3, label='Function')
plt.plot(x, y1, 'b', linewidth=3, label='1st order approximation')
plt.plot(x, y2, 'r', linewidth=3, label='2nd order approximation')
plt.legend()
plt.xticks([-1, 0, 1])
plt.show()

## Bivariate Taylor approximation
nplot = [101, 101]
a = [0, -1]
b = [2, 1]
x1, x2 = nodeunif(nplot, a, b)
x1.shape = nplot
x2.shape = nplot

y = np.exp(x2) * x1**2
Exemple #2
0
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
""" Approximating functions on R^2

This m-file illustrates how to use CompEcon Toolbox routines to construct and operate with an approximant for a
function defined on a rectangle in R^2.

In particular, we construct an approximant for f(x1,x2)=cos(x1)/exp(x2) on [-1,1]X[-1,1].  The function used in this
illustration posseses a closed-form, which will allow us to measure approximation error precisely. Of course, in
practical applications, the function to be approximated will not possess a known closed-form.

In order to carry out the exercise, one must first code the function to be approximated at arbitrary points.
Let's begin:
"""
# Function to be approximated
f = lambda x: np.sin(x[0]) / np.exp(x[1])

# Set the points of approximation interval:
a = -1  # left points
b = 1  # right points

# Choose an approximation scheme. In this case, let us use an 11 by 11 Chebychev approximation scheme:
n = 11  # order of approximation
basis = BasisChebyshev(
    [n, n], a,
    b)  # write n twice to indicate the two dimensions. a and b are expanded

# Compute the basis coefficients c.  There are various way to do this:
# One may compute the standard approximation nodes x and corresponding interpolation matrix Phi and function values y
# and use:
x = basis.nodes
__author__ = 'Randall'

## DEMAPP05 Chebychev polynomial and spline approximantion of various functions

# Preliminary tasks

# Demonstrates Chebychev polynomial, cubic spline, and linear spline approximation for the following functions
#   1: y = sqrt(x+1)
#   2: y = exp(-x)
#   3: y = 1./(1+25*x.^2).
#   4: y = sqrt(abs(x))

## Functions to be approximated
funcs = [
    lambda x: 1 + x + 2 * x**2 - 3 * x**3, lambda x: np.exp(-x), lambda x: 1 /
    (1 + 25 * x**2), lambda x: np.sqrt(np.abs(x))
]

# Set degree of approximation and endpoints of approximation interval
n = 7  # degree of approximation
a = -1  # left endpoint
b = 1  # right endpoint

# Construct uniform grid for error ploting
x = nodeunif(2001, a, b)


def subfig(k, x, y, xlim, ylim, title):
    plt.subplot(2, 2, k)
    plt.plot(x, y)
Exemple #4
0
def f(x):
    fval = np.exp(-x) - 1
    fjac = -np.exp(-x)
    return fval, fjac
Exemple #5
0
from demos.setup import np, plt
from compecon import BasisChebyshev
from compecon.tools import nodeunif
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
""" Approximating using the CompEcon toolbox """
'''Univariate approximation'''
# Define function and derivative
f1 = lambda x: np.exp(-2 * x)
d1 = lambda x: -2 * np.exp(-2 * x)

# Fit approximant
n, a, b = 10, -1, 1
f1fit = BasisChebyshev(n, a, b, f=f1)

# Graph approximation error for function and derivative
axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]}
x = np.linspace(a, b, 1001)
fig = plt.figure(figsize=[12, 6])

ax1 = fig.add_subplot(121, title='Function approximation error', **axopts)
ax1.axhline(linestyle='--', color='gray', linewidth=2)
ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax1.plot(x, f1fit(x) - f1(x))

ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts)
ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2)
ax2.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax2.plot(x, f1fit(x, 1) - d1(x))
''' Bivariate Interpolation '''
# Define function
Exemple #6
0
# Approximation Structure
n = 500  # number of collocation nodes
pmin = -1  # minimum log price
pmax = 1  # maximum log price
Value = BasisSpline(n, pmin, pmax, labels=['logprice'],
                    l=['value'])  # basis functions
# p     = funnode(basis)                 # collocaton nodes
# Phi   = funbase(basis)                 # interpolation matrix

## SOLUTION

# Intialize Value Function
# c = zeros(n,1)                         # conditional value function basis coefficients

# Solve Bellman Equation and Compute Critical Exercise Prices
f = NLP(lambda p: K - np.exp(p) - delta * Value(p))

pcrit = np.empty(N + 1)

pcrit[0] = f.zero(0.0)

for t in range(N):
    v = np.zeros((1, n))
    for k in range(m):
        pnext = Value.nodes + e[k]
        v += w[k] * np.maximum(K - np.exp(pnext), delta * Value(pnext))

    Value[:] = v
    pcrit[t + 1] = f.broyden(pcrit[t])

# Print Critical Exercise Price 300 Periods to Expiration
from demos.setup import np, plt
from compecon import BasisChebyshev
from compecon.tools import nodeunif
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm


""" Approximating using the CompEcon toolbox """

'''Univariate approximation'''
# Define function and derivative
f1 = lambda x: np.exp(-2 * x)
d1 = lambda x: -2 * np.exp(-2 * x)

# Fit approximant
n, a, b = 10, -1, 1
f1fit = BasisChebyshev(n, a, b, f=f1)


# Graph approximation error for function and derivative
axopts = {'xlabel': 'x', 'ylabel': 'Error', 'xticks': [-1, 0, 1]}
x = np.linspace(a, b, 1001)
fig = plt.figure(figsize=[12, 6])

ax1 = fig.add_subplot(121, title='Function approximation error', **axopts)
ax1.axhline(linestyle='--', color='gray', linewidth=2)
ax1.plot(f1fit.nodes, np.zeros_like(f1fit.nodes), 'ro', markersize=12)
ax1.plot(x, f1fit(x) - f1(x))

ax2 = fig.add_subplot(122, title='Derivative approximation error', **axopts)
ax2.plot(x, np.zeros_like(x), '--', color='gray', linewidth=2)
Exemple #8
0
def f(x):
    g = np.zeros((3, x.size))
    g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
    return g
def f(x):
    fval = np.exp(-x) - 1
    fjac = -np.exp(-x)
    return fval, fjac
Exemple #10
0
def f(x):
    return np.exp(-x)
Exemple #11
0
print('\nThe approximate first derivative of exp(-x) at x=0 is {:.15f}'.format(
    d1))
print(
    "The  'exact'  first  derivative  of exp(-x) at x=0 is {:.15f}".format(-1))
print(
    '\nThe approximate second derivative of exp(-x) at x=0 is {:.15f}'.format(
        d2))
print(
    "The  'exact'  second  derivative of  exp(-x) at x=0 is {:.15f}".format(1))

# ... and one may even evaluate the approximant's definite integral between the left endpoint a and x:
int = F(x, -1)
print('\nThe approximate integral of exp(-x) between x=-1 and x=0 is {:.15f}'.
      format(int))
print("The  'exact'  integral  of  exp(-x) between x=-1 and x=0 is {:.15f}".
      format(np.exp(1) - 1))

# One may evaluate the accuracy of the Chebychev polynomial approximant by
# computing the approximation error on a highly refined grid of points:
ngrid = 5001  # number of grid nodes
xgrid = np.linspace(a, b, ngrid)  # generate refined grid for plotting
yapp = F(xgrid)  # approximant values at grid nodes
yact = f(xgrid)  # actual function values at grid points

demo.figure('Chebychev Approximation Error for exp(-x)', 'x', 'Error')
plt.plot(xgrid, yapp - yact)
plt.plot(xgrid, np.zeros(ngrid), 'k--', linewidth=2)

# The plot indicates that an order 10 Chebychev approximation scheme, produces approximation errors
# no bigger in magnitude than 6x10^-10. The approximation error exhibits the "Chebychev equioscillation
# property", oscilating relatively uniformly throughout the approximation domain.
Exemple #12
0
from demos.setup import np, plt, demo
from compecon import DDPmodel

# DEMDDP04 Binomial American put option model

# Model Parameters
T = 0.5                 # years to expiration
sigma = 0.2                 # annual volatility
r = 0.05                # annual interest rate
strike = 2.1                 # option strike price
p0 = 2.0                 # current asset price

# Discretization Parameters
N = 100                 # number of time intervals
tau = T / N              	# length of time intervals
delta = np.exp(-r * tau)   	# discount factor
u = np.exp(sigma * np.sqrt(tau))	# up jump factor
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma) # up jump probability

# State Space
price = p0 * u ** np.arange(-N, N+1)      # asset prices
n = price.size        # number of states

# Action Space (hold=1, exercise=2)
X = ['hold', 'exercise']   	# vector of actions
m = len(X)               	# number of actions

# Reward Function
f = np.zeros((m,n))
f[1] = strike - price
def f(x):
    g = np.zeros((3, x.size))
    g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
    return g
__author__ = 'Randall'

## DEMAPP05 Chebychev polynomial and spline approximantion of various functions

# Preliminary tasks

# Demonstrates Chebychev polynomial, cubic spline, and linear spline approximation for the following functions
#   1: y = sqrt(x+1)
#   2: y = exp(-x)
#   3: y = 1./(1+25*x.^2).
#   4: y = sqrt(abs(x))

## Functions to be approximated
funcs = [lambda x: 1 + x + 2 * x ** 2 - 3 * x ** 3,
         lambda x: np.exp(-x),
         lambda x: 1 / ( 1 + 25 * x ** 2),
         lambda x: np.sqrt(np.abs(x))]

# Set degree of approximation and endpoints of approximation interval
n = 7   # degree of approximation
a = -1  # left endpoint
b = 1   # right endpoint

# Construct uniform grid for error ploting
x = nodeunif(2001, a, b)

def subfig(k, x, y, xlim, ylim, title):
    plt.subplot(2, 2, k)
    plt.plot(x, y)
    plt.xlim(xlim)