def test_ClassicNewton_rosenbrock(self): x0 = np.array([100, 0]) problem = OptimizationProblem(self.functions.rosenbrock, self.functions.rosenbrockGrad) minimum = problem.solve(x0, self.tol, "ClassicNewton") self.assertAlmostEqual(self.functions.rosenbrock(minimum), self.functions.rosenbrock(np.array([1, 1])))
def test_BFGS_inexact_rosenbrock(self): x0 = np.array([1.5, 1.5]) problem = OptimizationProblem(self.functions.rosenbrock, self.functions.rosenbrockGrad) minimum = problem.solve(x0, self.tol, "BFGS", "Inexact") self.assertAlmostEqual(self.functions.rosenbrock(minimum), self.functions.rosenbrock(np.array([1, 1])))
class TestOptimizationProblem(unittest.TestCase): def f(self, x): return dot(x, x) def g(self, x): return 2 * x def setUp(self): """ Set up the optimization problem """ self.optProb = OptimizationProblem(self.f, self.g) self.optProb2 = OptimizationProblem(self.f) def testInit(self): """ Test the init-function """ assert self.optProb.f == self.f assert self.optProb.g == self.g assert self.optProb.f == self.optProb2.f assert self.optProb.g != self.optProb2.g def testApproxG(self): """ Testing the approximation of the gradient by using a value """ x = np.array([1, 2, 3]) np.testing.assert_array_almost_equal(self.optProb.g(x), self.optProb2.g(x))
def test_GoodBroyden_inexact_midPol2d(self): x0 = np.array([5, 5]) problem = OptimizationProblem(self.functions.midPol2d, self.functions.midPolGrad2d) minimum = problem.solve(x0, self.tol, "GoodBroyden", "Inexact") for i in range(np.size(minimum)): self.assertAlmostEqual(minimum[i], 0, 3)
def test_BFGS_midPol2d_exact(self): x0 = np.array([5, 5]) problem = OptimizationProblem(self.functions.midPol2d, self.functions.midPolGrad2d) minimum = problem.solve(x0, self.tol, "BFGS", "Exact") for i in range(np.size(x0)): self.assertAlmostEqual(minimum[i], 0, 3)
def test_ClassicNewton_midPol2d(self): x0 = np.array([20, 2]) problem = OptimizationProblem(self.functions.midPol2d, self.functions.midPolGrad2d) minimum = problem.solve(x0, self.tol, "ClassicNewton") for i in range(np.size(minimum)): self.assertAlmostEqual(minimum[i], 0, 3)
def test_DFP_inexact_chebyquad_n8(self): x0 = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) problem = OptimizationProblem(self.functions.chebyquad, self.functions.chebyquadGrad) minimum = problem.solve(x0, self.tol, "DFP", "Inexact") xmin = so.fmin_bfgs(self.functions.chebyquad, x0, self.functions.chebyquadGrad ) # should converge after 18 iterations for i in range(np.size(minimum)): self.assertAlmostEqual(minimum[i], xmin[i], 1)
def setUp(self): """ Set up the optimization problem """ self.optProb = OptimizationProblem(f, g) self.optProbWithoutG = OptimizationProblem(f) self.optMeth = QuasiNewton(self.optProb, MethodType.CLASSICALNEWTON) self.optProb2 = OptimizationProblem(f2, g2) self.optProbWithoutG2 = OptimizationProblem(f2) self.optMeth2 = QuasiNewton(self.optProb2, MethodType.CLASSICALNEWTON) self.optProbRos = OptimizationProblem(rosf, rosg) self.optMethRos = QuasiNewton(self.optProbRos, MethodType.CLASSICALNEWTON)
def setUp(self): """ Set up the optimization problem """ self.optProb=OptimizationProblem(f,g) self.optProbWithoutG=OptimizationProblem(f) self.classicalNewton=QuasiNewton(self.optProb,MethodType.CLASSICALNEWTON) self.optProb2=OptimizationProblem(f2,g2) self.optProbWithoutG2=OptimizationProblem(f2) self.optBfgs=QuasiNewton(self.optProb,MethodType.BFGS) self.optDfp=QuasiNewton(self.optProb,MethodType.DFP) self.optclassNew2=QuasiNewton(self.optProb2,MethodType.CLASSICALNEWTON) self.optBfgs2=QuasiNewton(self.optProb2,MethodType.BFGS) self.optDfp2=QuasiNewton(self.optProb2,MethodType.DFP)
def testChebyquad(self): #testing the chebyshev polynomial x4=np.array([0]*4) optProb=OptimizationProblem(chebyquad_fcn,gradchebyquad) newtonLinesearchSteepest=QuasiNewton(optProb,MethodType.ClassicalNewtonExactLineSteepest) bfgs=QuasiNewton(optProb,MethodType.ClassicalNewtonExactLineSteepest) x8=np.array([0]*8) x11=np.array([0]*11) #calculate with exact linesearch newton4=newtonLinesearchSteepest.solve(x4)[0] newton8=newtonLinesearchSteepest.solve(x8)[0] newton11=newtonLinesearchSteepest.solve(x11)[0] #calculate with provided bfgs scipy4=sp.optimize.fmin_bfgs(chebyquad,x4) scipy8=sp.optimize.fmin_bfgs(chebyquad,x8) scipy11=sp.optimize.fmin_bfgs(chebyquad,x11) #calculate with our bfgs bfgs4=bfgs.solve(x4)[0] bfgs8=bfgs.solve(x8)[0] bfgs11=bfgs.solve(x11)[0] print("Cheby-Newton:") print(newton4) print(newton8) print(newton11) print("Cheby-Scipy:") print(scipy4) print(scipy8) print(scipy11) print("Cheby-BFGS:") print(bfgs4) print(bfgs8) print(bfgs11)
def setUp(self, func=rosen, x_0=array([230, 30]), gradient=None): """ Sets up the test class. Parameters ---------- func : Function to be tested, default is rosenbrock function x_0 : Guess, default guess is [230, 30] for the rosenbrock function gradient : Optional parameter for gradient. Else it is solved. """ self.problem = OptimizationProblem(func, x_0, gradient)
def testRosenBrockClassic(self): """ Optimzes the Rosenbrock function with the classical Newton method """ prob = OptimizationProblem(rosf,rosg) solver = QuasiNewton(prob,MethodType.CLASSICALNEWTON) tol=1e-5 kmax=50 x0 = np.transpose(np.array([0.0,0.0])) (solution,fval,k) = solver.solve(x0,tol,kmax) self.assertAlmostEqual(fval,0) np.testing.assert_allclose(solution,np.array([1,1]),0,1e-5)
def testRosenBrockLineSearchInexact(self): """ Optimizes the Rosenbrock function with a Newton method that applies inexact linesearch """ prob = OptimizationProblem(rosf,rosg) solver = QuasiNewton(prob,MethodType.ClassicalNewtonInexactLine) tol=1e-5 kmax=50 x0 = np.transpose(np.array([0.0,0.0])) (solution,fval,k) = solver.solve(x0,tol,kmax) self.assertAlmostEqual(fval,0) np.testing.assert_allclose(solution,np.array([1,1]),0,1e-5)
def testRosenBrockNewtonVsLineSearch(self): """ Tests if the two versions of the Algorithm come to the same optimum """ prob = OptimizationProblem(rosf,rosg) solver1 = QuasiNewton(prob,MethodType.CLASSICALNEWTON) solver2 = QuasiNewton(prob,MethodType.ClassicalNewtonExactLineSteepest) tol=1e-5 kmax=50 x0 = np.transpose(np.array([0.0,0.0])) (solution1,fval1,k1) = solver1.solve(x0,tol,kmax) (solution2,fval2,k2) = solver2.solve(x0,tol,kmax) self.assertAlmostEqual(fval1,fval2) np.testing.assert_allclose(solution1,solution2,0,1e-5)
def lineSearchExactNewton(self, xk, sk, alpha0=1): """ Method to do linesearch by classical Newton method. This function just needs the parameter x, alpha0 and s. The rest of the parameters are not used in this function! Default initial guess for alpha is 1. :return: returns alpha """ # Newton Method #import QuasiNewton from QuasiNewton import QuasiNewton def fAlpha(alpha): return self.optProb.f(xk + alpha * sk) def gAlpha(alpha): return np.dot(self.optProb.g(xk + alpha * sk), sk) optProbAlpha = OptimizationProblem(fAlpha, gAlpha) #fAlpha function and gradient must be put in; no line search, finite difference approx of H CN = QuasiNewton(optProbAlpha, self.finiteDifference, True) return CN.solve(alpha0)[0] #return alpha
delta = alpha*(self.inverse_hessian@self._gradient(x_k)) gamma = self._gradient(x_k) - self._gradient(x_k-delta) hg = self.inverse_hessian@gamma dg = delta@gamma u1 = gamma@hg a1 = a2 = a3 = 1/dg u2 = outer(delta,delta) u3 = outer(delta, gamma)@self.inverse_hessian + self.inverse_hessian@outer(gamma, delta) #u3 = outer(dg, self.inverse_hessian) + outer(dg, self.inverse_hessian).T #Transponat för motsat ordning self.inverse_hessian = self.inverse_hessian+(1+a1*u1)*(a2*u2)-a3*u3 if __name__ == '__main__': #Rosenbrock function using regular newton. op = OptimizationProblem(rosen, array([320,-30])) s = Solver(op) s.plot(rosen, 'surface') rosenZeroDefault = s.newton(mode = 'exact') rosenZeroExact = s.newton(mode = 'exact') print('Solution to the Optimization problem using regular Newton is:', rosenZeroDefault) print('Solution to the Optimization problem using Exact Line Serach is:', rosenZeroExact) """ function = lambda x: (x[0]-50)**2 + (x[1]-73)**2 #function = lambda x: 100*((x[1]-x[0]**2)**2)+(1-x[0])**2 op = OptimizationProblem(function, array([5,80])) s = Solver(op) s.plot(rosen, 'surface')
def midPolGrad2d(x): return np.array([4 * x[0]**3, 4 * x[1]**3]) def hardPol2d(x): return np.array(100 * ((x[1] - x[0]**2)**2) + ((1 - x[0])**2)).T def hardPolGrad2d(x): return np.array([ 2 * ((200 * x[0]**3) - (200 * x[0] * x[1]) + (x[0] - 1)), 200 * (x[1] - (x[0]**2)) ]).T dk = np.array([-1, -1]) x0 = np.array([1.1, 1.1]) tol = 0.01 #m = x0 + Linesearch.inexactLinesearch(x0,dk,esyPol2d,esyPolGrad2d)*dk #t = np.arange(x0-1., m+1, .001) #plt.plot(t, esyPol(t), 'b-',) #plt.plot(m,esyPol(m),'ro') #plt.plot(x0,esyPol(x0),'bo') #plt.show() #x01=np.array([200]) problemPol = OptimizationProblem(hardPol2d, hardPolGrad2d) problemPol.solve(x0, tol, "ClassicNewton", "Exact")
# Min in (0,0) def midPol2d(x): return np.array(x[0]**4 + x[1]**4) def midPolGrad2d(x): return np.array([4 * x[0]**3, 4 * x[1]**3]) tol = 1e-5 x0 = np.array([5]) x02d = np.array([20, 2]) x03d = np.array([20, 2, 30]) #problem1 = OptimizationProblem(f, grad) #minimum = problem1.solve(x0, tol, 'ClassicNewton') #print(minimum) # #problem2 = OptimizationProblem(f2d, grad2d) #minimum = problem2.solve(x02d, tol, 'ClassicNewton') #print(minimum) # #problem3 = OptimizationProblem(f3d, grad3d) #minimum = problem3.solve(x03d, tol, 'ClassicNewton') #print(minimum) x04 = np.array([5, 5]) problem3 = OptimizationProblem(midPol2d, midPolGrad2d) minimum = problem3.solve(x04, tol, 'BFGS', 'Inexact') print(minimum)
def setUp(self): """ Set up the optimization problem """ self.optProb = OptimizationProblem(self.f, self.g) self.optProb2 = OptimizationProblem(self.f)