예제 #1
0
    def good_broyden_method(self, x0, line_search, tol=10**-6, maxit=1000):
        """
        Finds the minima using good Broyden method
        :param x0: Starting point
        :param line_search: Line search method to be used
        :param tol: Tolerance for how close to the minima we need to get
        :param maxit: Maximum number of iterations
        :return: Minimum point x
        """
        x = np.copy(x0)
        n = len(x)
        # Initial guess of inverse of Hessian
        H = np.identity(n)

        for i in range(maxit):
            # Search direction
            p = -np.dot(H, self.grad(x))
            if line_search == "exact":
                alpha = ls.ls_exact(self.func, x, p)
            elif line_search == "goldstein":
                alpha = ls.ls_gold(self.func, self.grad, x, p, tol)
            elif line_search == "wolfe":
                alpha = ls.ls_wolfe(self.func, self.grad, x, p, tol)
            else:
                raise ValueError('No valid line search method was given')

            delta = alpha * p
            x = x + delta
            if la.norm(p) < tol:
                print('Converged in ' + str(i) + ' iteration(s)!')
                return x

            # Broyden update of H
            gamma = self.grad(x) - self.grad(x - delta)
            a = (delta - np.dot(H, gamma))
            b = np.dot(np.transpose(delta), H)
            c = np.inner(delta, np.dot(H, gamma))
            if c == 0.:
                raise ArithmeticError('Division by zero!')

            H = H + np.outer(a, b) / c

        print('Did not converge. Number of iterations: ' + str(i) +
              '\nFinal error: ' + str(la.norm(delta)))
예제 #2
0
    def newton_method(self, x0, line_search, tol=10**-6, maxit=1000):
        """
        Finds the minima using Newton's method
        :param x0: Starting point
        :param line_search: Line search method to be used
        :param tol: Tolerance for how close to the minima we need to get
        :param maxit: Maximum number of iterations
        :return: Minimum point x
        """
        x = np.copy(x0)

        for i in range(maxit):

            # Approximate Hessian by finite differences
            G = self.calc_hessian(x)
            G = 0.5 * (np.conjugate(G) + np.transpose(np.conjugate(G)))
            try:
                L = la.cholesky(G)
                y = la.solve(np.transpose(L), self.grad(x))
                p = -la.solve(L, y)
            except Exception:
                # print("Hessian not spd! Solving linear system without Cholesky factorization.")
                p = -la.solve(G, self.grad(x))
            if line_search == "exact":
                alpha = ls.ls_exact(self.func, x, p)
            elif line_search == "goldstein":
                alpha = ls.ls_gold(self.func, self.grad, x, p, tol)
            elif line_search == "wolfe":
                alpha = ls.ls_wolfe(self.func, self.grad, x, p, tol)
            else:
                raise ValueError('No valid line search method was given')

            x = x + alpha * p
            if la.norm(p) < tol:
                print("Converged in " + str(i) + " iteration(s)!")
                return x
            # print("Iteration: " + str(i) + " Step: " + str(p))
        print("Did not converge. Number of iterations: " + str(i) +
              "\nFinal error: " + str(la.norm(p)))
        return 1
예제 #3
0
    def bfgs_method(self, x0, line_search, tol=10**-6, maxit=1000):
        """
        Finds the minima using BFGS method
        :param x0: Starting point
        :param line_search: Line search method to be used
        :param tol: Tolerance for how close to the minima we need to get
        :param maxit: Maximum number of iterations
        :return: Minimum point x
        """
        x = np.copy(x0)
        n = len(x)
        # Initial guess of inverse of Hessian
        H = np.identity(n)

        for i in range(maxit):
            # Search direction
            p = -np.dot(H, self.grad(x))
            if line_search == "exact":
                alpha = ls.ls_exact(self.func, x, p)
            elif line_search == "goldstein":
                alpha = ls.ls_gold(self.func, self.grad, x, p, tol)
            elif line_search == "wolfe":
                alpha = ls.ls_wolfe(self.func, self.grad, x, p, tol)
            else:
                raise ValueError('No valid line search method was given')

            w = alpha * p
            x = x + w
            if la.norm(w) < tol:
                print('Converged in ' + str(i) + ' iteration(s)!')
                return x
            # BFGS update of H inverse
            y = self.grad(x) - self.grad(x - w)
            rho = 1. / np.inner(y, w)
            H = np.dot(np.dot(np.identity(n) - rho * np.outer(w, y), H),
                       (np.identity(n) -
                        rho * np.outer(y, w))) + rho * np.outer(w, w)

        print('Did not converge. Number of iterations: ' + str(i) +
              '\nFinal error: ' + str(la.norm(w)))
예제 #4
0
    def dfp_method(self, x0, line_search, tol=10**-6, maxit=1000):
        """
        Finds the minima using DFP method
        :param x0: Starting point
        :param line_search: Line search method to be used
        :param tol: Tolerance for how close to the minima we need to get
        :param maxit: Maximum number of iterations
        :return: Minimum point x
        """
        x = x0
        n = len(x)
        # Initial guess of inverse of Hessian
        H = np.identity(n)

        for i in range(maxit):
            # Search direction
            p = -np.dot(H, self.grad(x))
            if line_search == "exact":
                alpha = ls.ls_exact(self.func, x, p)
            elif line_search == "goldstein":
                alpha = ls.ls_gold(self.func, self.grad, x, p, tol)
            elif line_search == "wolfe":
                alpha = ls.ls_wolfe(self.func, self.grad, x, p, tol)
            else:
                raise ValueError('No valid line search method was given')

            s = alpha * p
            x = x + s
            if la.norm(s) < tol:
                print('Converged in ' + str(i) + ' iteration(s)!')
                return x
            y = self.grad(x) - self.grad(x - s)
            H = H - np.outer(np.dot(H, y), np.dot(np.transpose(y), H)) / np.inner(np.transpose(y), np.dot(H, y))\
                + np.outer(s, s) / np.inner(y, s)

        print('Did not converge. Number of iterations: ' + str(i) +
              '\nFinal error: ' + str(la.norm(s)))