def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 1.0])
     np.random.seed(0)
     soln = dfbgn.solve(rosenbrock, x0)
     print(soln)
     print(rosenbrock_jacobian(soln.x))
     self.assertTrue(
         array_compare(soln.x, np.array([1.0, 1.0]), thresh=1e-4),
         "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, rosenbrock(soln.x), thresh=1e-10),
         "Wrong resid")
     # print(soln.jacobian, rosenbrock_jacobian(soln.x))
     # self.assertTrue(array_compare(soln.jacobian, rosenbrock_jacobian(soln.x), thresh=2.5e0), "Wrong Jacobian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
 def runTest(self):
     n, m = 4, 6
     np.random.seed(0)  # (fixing random seed)
     A = np.random.rand(m, n)
     b = np.random.rand(m)
     objfun = lambda x: np.dot(A, x) - b
     xmin = np.linalg.lstsq(A, b)[0]
     fmin = np.dot(objfun(xmin), objfun(xmin))
     x0 = np.zeros((n, ))
     soln = dfbgn.solve(objfun, x0)  # reduced space
     print(soln)
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, objfun(soln.x), thresh=1e-10),
         "Wrong resid")
     # self.assertTrue(array_compare(soln.jacobian, A, thresh=1e-6), "Wrong Jacobian")  # should get exact Jacobian for linear problems
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
Example #3
0
    return rosenbrock(x) * (1.0 + 1e-2 * np.random.normal(size=(2, )))


# Define the starting point
x0 = np.array([-1.2, 1.0])

# Set random seed (for reproducibility)
np.random.seed(0)

print("Demonstrate noise in function evaluation:")
for i in range(5):
    print("objfun(x0) = %s" % str(rosenbrock_noisy(x0)))
print("")

# Call DFBGN
soln = dfbgn.solve(rosenbrock_noisy, x0, fixed_block=2)

# Display output
print(soln)

# Compare with a derivative-based solver
import scipy.optimize as opt

soln = opt.least_squares(rosenbrock_noisy, x0)

print("")
print("** SciPy results **")
print("Solution xmin = %s" % str(soln.x))
print("Objective value f(xmin) = %.10g" % (2.0 * soln.cost))
print("Needed %g objective evaluations" % soln.nfev)
print("Exit flag = %g" % soln.status)
# DFBGN example: minimize the Rosenbrock function
from __future__ import print_function
import numpy as np
import dfbgn

# Define the objective function
def rosenbrock(x):
    return np.array([10.0 * (x[1] - x[0] ** 2), 1.0 - x[0]])

# Define the starting point
x0 = np.array([-1.2, 1.0])

# For optional extra output details
# import logging
# logging.basicConfig(level=logging.INFO, format='%(message)s')

# DFBGN is a randomized algorithm - set random seed for reproducibility
np.random.seed(0)

# Call DFBGN
soln = dfbgn.solve(rosenbrock, x0, fixed_block=2)

# Display output
print(soln)

# DFBGN example: Solving a nonlinear system of equations
# Originally from:
# http://support.sas.com/documentation/cdl/en/imlug/66112/HTML/default/viewer.htm#imlug_genstatexpls_sect004.htm

from __future__ import print_function
from math import exp
import numpy as np
import dfbgn


# Want to solve:
#   x1 + x2 - x1*x2 + 2 = 0
#   x1 * exp(-x2) - 1   = 0
def nonlinear_system(x):
    return np.array([x[0] + x[1] - x[0] * x[1] + 2, x[0] * exp(-x[1]) - 1.0])


# Warning: if there are multiple solutions, which one
#          DFBGN returns will likely depend on x0!
x0 = np.array([0.1, -2.0])

# DFBGN is a randomized algorithm - set random seed for reproducibility
np.random.seed(0)

# Call DFBGN
soln = dfbgn.solve(nonlinear_system, x0, fixed_block=2)

# Display output
print(soln)