def quasi_newton(f, fd, x, max_iterations, precision, callback): I = identity(x.size) H = I x_prev = x f_prev = f fd_prev = fd for i in range(1, max_iterations): gradient = fd(x) direction = -H * matrix(gradient).T direction = squeeze(asarray(direction)) alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9) x_prev = x x = x + alpha * direction callback(i, direction, alpha, x) if linalg.norm(x - x_prev) < precision: break s = matrix(x - x_prev).T y = matrix(fd(x) - fd(x_prev)).T rho = float(1 / (y.T * s)) H = (I - rho * s * y.T) * H * (I - rho * y * s.T) + rho * s * s.T return x
def quasi_newton(f, fd, x, max_iterations, precision, callback): I = identity(x.size) H = I x_prev = x f_prev = f fd_prev = fd for i in range(1, max_iterations): gradient = fd(x) direction = -H * matrix(gradient).T direction = squeeze(asarray(direction)) alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9) x_prev = x x = x + alpha*direction callback(i, direction, alpha, x) if linalg.norm(x - x_prev) < precision: break s = matrix(x - x_prev).T y = matrix(fd(x) - fd(x_prev)).T rho = float(1 / (y.T*s)) H = (I - rho*s*y.T)*H*(I - rho*y*s.T) + rho*s*s.T return x
def steepest_descent(f, fd, x, max_iterations, precision, callback): for i in range(0, max_iterations): direction = - fd(x) alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9) x = x + alpha*direction callback(i, direction, alpha, x) if linalg.norm(direction) < precision: break return x
def newton(f, fd, fdd, x, max_iterations, precision, callback): for i in range(1, max_iterations): gradient = fd(x) hessian = fdd(x) direction = -linalg.solve(hessian, gradient) alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9) x_prev = x x = x + alpha*direction callback(i, direction, alpha, x) if linalg.norm(x - x_prev) < precision: break return x
def newton(f, fd, fdd, x, max_iterations, precision, callback): for i in range(1, max_iterations): gradient = fd(x) hessian = fdd(x) direction = -linalg.solve(hessian, gradient) alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9) x_prev = x x = x + alpha * direction callback(i, direction, alpha, x) if linalg.norm(x - x_prev) < precision: break return x
def conjugate_gradient(f, fd, x, max_iterations, precision, callback): direction = -fd(x) gradient = None gradient_next = matrix(fd(x)).T x_prev = None for i in range(1, max_iterations): alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.1) x_prev = x x = x + alpha * direction callback(i, direction, alpha, x) gradient = gradient_next gradient_next = matrix(fd(x)).T if linalg.norm(x - x_prev) < precision: break BFR = (gradient_next.T * gradient_next) / (gradient.T * gradient) BFR = squeeze(asarray(BFR)) direction = -squeeze(asarray(gradient_next)) + BFR * direction return x
def conjugate_gradient(f, fd, x, max_iterations, precision, callback): direction = -fd(x) gradient = None gradient_next = matrix(fd(x)).T x_prev = None for i in range(1, max_iterations): alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.1) x_prev = x x = x + alpha*direction callback(i, direction, alpha, x) gradient = gradient_next gradient_next = matrix(fd(x)).T if linalg.norm(x - x_prev) < precision: break BFR = (gradient_next.T * gradient_next) / (gradient.T * gradient) BFR = squeeze(asarray(BFR)) direction = -squeeze(asarray(gradient_next)) + BFR*direction return x