Ejemplo n.º 1
0
def newton(f,
           x,
           optimizer=lambda task: fibonacci(task, 0, 1)[0],
           stop_criterion=None,
           grad=None,
           hesse=None):
    if grad is None:
        grad = derivative(f)
    if hesse is None:
        hesse = hessian(f)
    if stop_criterion is None:

        def stop_criterion(iterations, x):
            return iterations > 1000 or np.linalg.norm(
                x) > 1e9 or np.linalg.norm(grad(x)) < 1e-10

    iterations = 0
    trace = [x]

    while not stop_criterion(iterations, x):
        iterations += 1

        p = np.linalg.pinv(hesse(x)).dot(grad(x))
        x = x - optimizer(lambda alpha: f(*(x - alpha * p))) * p
        trace.append(x)

    return np.array(trace)
def conjugate_gradient(f,
                       x0,
                       optimizer=lambda task: fibonacci(task, 0, 1)[0],
                       stop_criterion=None,
                       grad=None):
    if grad is None:
        grad = derivative(f)
    if stop_criterion is None:

        def stop_criterion(iterations, x):
            return iterations > 1000 or np.linalg.norm(
                x) > 1e9 or np.linalg.norm(grad(x)) < 1e-10

    x = x0

    trace = [x]
    iterations = 0
    r = -grad(x)
    p = r

    while not stop_criterion(iterations, x):
        iterations += 1

        new_r = -grad(x)

        beta = new_r.dot(new_r) / r.dot(r)
        p = p * beta + new_r
        step = optimizer(lambda alpha: f(*(x + alpha * p)))
        x = x + step * p
        trace.append(x)

    return np.array(trace)
def conjugate_gradient(f,
                       x0,
                       optimizer=lambda task: fibonacci(task, 0, 1.1)[0],
                       stop_criterion=None,
                       grad=None):
    if grad is None:
        grad = derivative(f)
    if stop_criterion is None:
        stop_criterion = default_stop_criterion
    x = x0

    trace = [x]
    iterations = 0
    r = -grad(x)
    p = r

    while not stop_criterion(trace, grad, f):
        iterations += 1

        new_r = -grad(x)

        beta = new_r.dot(new_r) / r.dot(r)
        p = p * beta + new_r
        step = optimizer(lambda alpha: f(*(x + alpha * p)))
        x = x + step * p
        trace.append(x)

    return np.array(trace)
Ejemplo n.º 4
0
def grad_descent(f,
                 x0,
                 step_searcher=ternary_searcher,
                 eps=1e-6,
                 max_iters=100,
                 df=None):
    if df is None:
        df = derivative(f)
    x = x0
    prev = np.zeros_like(x)

    points = [x0]

    while abs(f(*x) - f(*prev)) > eps:  #np.linalg.norm(x - prev) > eps:
        if max_iters is not None and len(points) > max_iters:
            break

        dfx = df(x)
        dfx = dfx / np.sqrt(np.sum(dfx**2))
        step = step_searcher(lambda s: f(*(x - s * dfx)), [-1, 1], len(points))
        prev = x
        x = x - step * dfx
        points.append(x)

    return np.array(points)
def grad_descent(f,
                 x0,
                 optimizer=lambda task: fibonacci(task, 0, 1.1)[0],
                 stop_criterion=None,
                 df=None):
    if df is None:
        df = derivative(f)
    if stop_criterion is None:
        stop_criterion = default_stop_criterion
    x = x0
    prev = np.zeros_like(x)

    points = [x0]

    while not stop_criterion(points, df, f):  #np.linalg.norm(x - prev) > eps:

        dfx = df(x)
        dfx = dfx / np.sqrt(np.sum(dfx**2))
        step = optimizer(lambda s: f(*(x - s * dfx)))
        prev = x
        x = x - step * dfx
        points.append(x)

    return np.array(points)