def g(x):
    return np.sqrt(x)

problem_as_fixpoint = NLP(g, xinit)

''' Equivalent Rootfinding Formulation '''
def f(x):
    fval = x - np.sqrt(x)
    fjac = 1-0.5 / np.sqrt(x)
    return fval, fjac

problem_as_zero = NLP(f, xinit)

''' Compute fixed-point using Newton method '''
t0 = tic()
x1 = problem_as_zero.newton()
t1 = 100 * toc(t0)
n1 = problem_as_zero.fnorm

''' Compute fixed-point using Broyden method '''
t0 = tic()
x2 = problem_as_zero.broyden()
t2 = 100 * toc(t0)
n2 = problem_as_zero.fnorm

''' Compute fixed-point using function iteration '''
t0 = tic()
x3 = problem_as_fixpoint.fixpoint()
t3 = 100 * toc(t0)
n3 = np.linalg.norm(problem_as_fixpoint.fx - x3)
示例#2
0

problem_as_fixpoint = NLP(g, xinit)
''' Equivalent Rootfinding Formulation '''


def f(x):
    fval = x - np.sqrt(x)
    fjac = 1 - 0.5 / np.sqrt(x)
    return fval, fjac


problem_as_zero = NLP(f, xinit)
''' Compute fixed-point using Newton method '''
t0 = tic()
x1 = problem_as_zero.newton()
t1 = 100 * toc(t0)
n1 = problem_as_zero.fnorm
''' Compute fixed-point using Broyden method '''
t0 = tic()
x2 = problem_as_zero.broyden()
t2 = 100 * toc(t0)
n2 = problem_as_zero.fnorm
''' Compute fixed-point using function iteration '''
t0 = tic()
x3 = problem_as_fixpoint.fixpoint()
t3 = 100 * toc(t0)
n3 = np.linalg.norm(problem_as_fixpoint.fx - x3)

print('Hundredths of seconds required to compute fixed-point of g(x)=sqrt(x)')
print('using Newton, Broyden, and function iteration methods, starting at')
示例#3
0
example(35)


def cournot(q):
    c = np.array([0.6, 0.8])
    eta = 1.6
    e = -1 / eta
    s = q.sum()
    fval = s**e + e * s**(e - 1) * q - c * q
    fjac = e*s**(e-1) * np.ones([2,2]) + e * s ** (e-1) * np.identity(2) +\
           (e-1)*e*s **(e-2)* np.outer(q, [1,1]) - np.diag(c)
    return fval, fjac


market = NLP(cournot)
x = market.newton([0.2, 0.2])
print('q1 = ', x[0], '\nq2 = ', x[1])
''' Example page 39 '''
# continuation of example page 35
example(39)
x = market.broyden([0.2, 0.2])
print('q1 = ', x[0], '\nq2 = ', x[1])
''' Example page 43 '''
# numbers don't match those of Table 3.1, but CompEcon2014' broyden gives same answer as this code
example(43)
opts = {'maxit': 30, 'all_x': True, 'print': True}

g = NLP(lambda x: sqrt(x), 0.5)
f = NLP(lambda x: (x - sqrt(x), 1 - 0.5 / sqrt(x)), 0.5)

x_fp = g.fixpoint(**opts)
# We also define `err` to compute the base-10 logarithm of the error (the gap between the current iteration and the solution).

# In[2]:

A = NLP(lambda x: (np.exp(x)-1, np.exp(x)), all_x=True, tol=1e-20)
err = lambda z: np.log10(np.abs(z)).flatten()
x0 = 2.0


# ### Solve the problem

# #### * Using Newton's method

# In[3]:

A.newton(x0)
err_newton = err(A.x_sequence)


# #### * Using Broyden's method

# In[4]:

A.broyden(x0)
err_broyden = err(A.x_sequence)


# #### * Using function iteration
# 
# This method finds a zero of $f(x)$ by looking for a fixpoint of $g(x) = x-f(x)$.
示例#5
0
from compecon import NLP
''' Set up the problem '''


def f(x):
    fval = np.exp(-x) - 1
    fjac = -np.exp(-x)
    return fval, fjac


problem = NLP(f, all_x=True)
''' Randomly generate starting point '''
problem.x0 = 10 * np.random.randn(1)
''' Compute root using Newton method '''
t0 = tic()
x1 = problem.newton()
t1 = 100 * toc(t0)
n1, x_newton = problem.fnorm, problem.x_sequence
''' Compute root using Broyden method '''
t0 = tic()
x2 = problem.broyden()
t2 = 100 * toc(t0)
n2, x_broyden = problem.fnorm, problem.x_sequence
''' Print results '''
print('Hundredths of seconds required to compute root of exp(-x)-1,')
print('via Newton and Broyden methods, starting at x = %4.2f.' % problem.x0)
print('\nMethod      Time   Norm of f   Final x')
print('Newton  %8.2f    %8.0e     %5.2f' % (t1, n1, x1))
print('Broyden %8.2f    %8.0e     %5.2f' % (t2, n2, x2))
''' View current options for solver '''
print(problem.opts)
示例#6
0
for it in range(40):
    f, J = cournot(q)
    step = -np.linalg.solve(J, f)
    q += step
    if np.linalg.norm(step) < 1.e-10: break

print(q)
''' Generate data for contour plot '''
n = 100
q1 = np.linspace(0.1, 1.5, n)
q2 = np.linspace(0.1, 1.5, n)
z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T
''' Using a NLP object '''
q = np.array([0.2, 0.2])
cournot_problem = NLP(cournot)  #, q)
q_star, fq_star = cournot_problem.newton(q)
print(q_star)
''' Plot figures '''
steps_options = {
    'marker': 'o',
    'color': (0.2, 0.2, .81),
    'linewidth': 1.0,
    'markersize': 6,
    'markerfacecolor': 'red',
    'markeredgecolor': 'red'
}

contour_options = {'levels': [0.0], 'colors': '0.25', 'linewidths': 0.5}

Q1, Q2 = np.meshgrid(q1, q2)
Z0 = np.reshape(z[0], (n, n), order='F')

''' Set up the problem '''
def f(x):
    fval = np.exp(-x) - 1
    fjac = -np.exp(-x)
    return fval, fjac

problem = NLP(f, all_x=True)

''' Randomly generate starting point '''
problem.x0 = 10 * np.random.randn(1)

''' Compute root using Newton method '''
t0 = tic()
x1 = problem.newton()
t1 = 100 * toc(t0)
n1, x_newton = problem.fnorm, problem.x_sequence


''' Compute root using Broyden method '''
t0 = tic()
x2 = problem.broyden()
t2 = 100 * toc(t0)
n2, x_broyden = problem.fnorm, problem.x_sequence


''' Print results '''
print('Hundredths of seconds required to compute root of exp(-x)-1,')
print('via Newton and Broyden methods, starting at x = %4.2f.' % problem.x0)
print('\nMethod      Time   Norm of f   Final x')
beta = np.array([0.6, 0.8])

''' Set up the Cournot function '''



''' Generate data for contour plot '''
n = 100
q1 = np.linspace(0.3, 1.1, n)
q2 = np.linspace(0.4, 1.2, n)
z = np.array([cournot(q)[0] for q in gridmake(q1, q2).T]).T

''' Using a NLP object '''
q = np.array([1.0, 0.5])
cournot_problem = NLP(cournot)#, q)
q_star, fq_star = cournot_problem.newton(q)
print(q_star)


''' Plot figures '''
steps_options = {'marker': 'o',
                 'color': (0.2, 0.2, .81),
                 'linewidth': 1.0,
                 'markersize': 6,
                 'markerfacecolor': 'red',
                 'markeredgecolor': 'red'}

contour_options = {'levels': [0.0],
                   'colors': '0.25',
                   'linewidths': 0.5}
    x, y = z
    fval = [x - x ** 2 - y ** 3,
            y - x * y + 0.5]
    fjac = [[1 - 2 * x, -3 * y **2],
            [-y, 1 - x]]

    return np.array(fval), np.array(fjac)

problem_as_zero = NLP(f, maxit=1500)

'''% Randomly generate starting point'''
xinit = np.random.randn(2)

''' Compute fixed-point using Newton method '''
t0 = tic()
z1 = problem_as_zero.newton(xinit)
t1 = 100 * toc(t0)
n1 = problem_as_zero.fnorm

''' Compute fixed-point using Broyden method '''
t0 = tic()
z2 = problem_as_zero.broyden(xinit)
t2 = 100 * toc(t0)
n2 = problem_as_zero.fnorm

''' Compute fixed-point using function iteration '''
t0 = tic()
z3 = problem_as_fixpoint.fixpoint(xinit)
t3 = 100 * toc(t0)
n3 = np.linalg.norm(problem_as_fixpoint.fx - z3)
示例#10
0
#
# We also define `err` to compute the base-10 logarithm of the error (the gap between the current iteration and the solution).

# In[2]:

A = NLP(lambda x: (np.exp(x) - 1, np.exp(x)), all_x=True, tol=1e-20)
err = lambda z: np.log10(np.abs(z)).flatten()
x0 = 2.0

# ### Solve the problem

# #### * Using Newton's method

# In[3]:

A.newton(x0)
err_newton = err(A.x_sequence)

# #### * Using Broyden's method

# In[4]:

A.broyden(x0)
err_broyden = err(A.x_sequence)

# #### * Using function iteration
#
# This method finds a zero of $f(x)$ by looking for a fixpoint of $g(x) = x-f(x)$.

# In[5]: