Exemplo n.º 1
0
def qnewton(fcn,x0,evalMax,eps=np.finfo(float).eps,lin=0,nIter=1e10,\
            qtype=1,fk=None,dF0=None,Hk=None):
    """Quasi-Newton Method
    Usage
    (x0, f0, ct, X, it) = qnewton(f,x0,evalMax)

    Arguments
    f       = function to minimize
    x0      = initial guess
    evalMax = maximum function evaluations

    Keyword Arguments
    eps     = convergence criterion
    lin     = linsearch type (0=backtracking,1=quad fit)
    nIter   = maximum iterations
    qtype   = hessian update type (0=DFP,1=BFGS)
    fk      = initial function value
    dF0     = initial function gradient
    Hk      = initial inverse Hessian approximation
    
    Returns
    xs      = minimal point
    fs      = minimal value
    dFs     = minimal point gradient
    Hs      = minimal point inv Hessian approx
    ct      = number of function evaluations
    X       = sequence of points
    it      = number of iterations used
    """
    if (lin != 0) and (lin != 1):
        raise ValueError("Unrecognized linsearch")
    if qtype == 0:
        update = dfp
    elif qtype == 1:
        update = bfgs

    # Setup
    ct = 0  # Function calls
    it = 0  # Iteration count
    x0 = np.array(x0)  # initial guess
    X = np.array([x0])  # point history
    n = np.size(x0)  # dim of problem
    if fk == None:
        fk = fcn(x0)
        ct += 1
    err = eps * 2  # initial error
    ### Initial direction: steepest descent
    if dF0 == None:
        dF0 = grad(x0, fcn, fk)
        ct += n
    d0 = -dF0
    if Hk == None:
        Hk = np.eye(n)

    ### Main Loop
    while (err > eps) and (ct < evalMax) and (it < nIter):
        # Compute new step direction
        d0 = -Hk.dot(dF0)
        # Perform line search
        p = d0 / norm(d0)
        if (lin == 0):
            m = np.dot(dF0, p)
            alp, fk, k = backtrack(x0, fcn, m, p, fk, em=evalMax - ct)
            ct += k
        elif (lin == 1):
            alp, fk, k = quad_fit(x0, fcn, p, fk)
            ct += k
        x1 = x0 + alp * p
        X = np.append(X, [x1], axis=0)

        # Update inverse hessian
        if (ct + n < evalMax):
            dF1 = grad(x1, fcn, fk)
            ct += n
        else:
            return x0, fk, dF0, Hk, ct, X, it
        delta = x1 - x0
        gamma = dF1 - dF0
        Hk = update(Hk, delta, gamma)
        # Swap values
        x0 = x1
        dF0 = dF1
        # Compute error (norm of grad)
        err = norm(dF0)
        # Iterate counter
        it += 1

    # Complete qNewton solve
    return x0, fk, dF0, Hk, ct, X, it
Exemplo n.º 2
0
def cg(f, x0, evalMax, eps=1e-3, lin=0, nIter=100, h=1e-2):
    """Conjugate Gradient solver
    Usage
    (x0, f0, ct, X, it) = cg(f,x0,evalMax)

    Arguments
    f       = function to minimize
    x0      = initial guess
    evalMax = maximum function evaluations

    Keyword arguments
    eps     = convergence criterion
    lin     = linsearch type (0=backtracking,1=quad fit)

    Returns
    xs      = minimal point
    fs      = minimal value
    ct      = number of function evaluations
    X       = sequence of points
    it      = number of iterations used
    """
    if (lin != 0) and (lin != 1):
        raise ValueError("Unrecognized linsearch")
    # Setup
    ct = 0  # Function calls
    it = 0  # Iteration count
    x0 = np.array(x0)  # initial guess
    X = np.array([x0])  # point history
    n = np.size(x0)  # dim of problem
    f0 = f(x0)
    ct += 1
    err = eps * 2  # initial error
    ### Initial direction: steepest descent
    dF0 = grad(x0, f, f0)
    ct += n
    d0 = -dF0

    ### Main loop
    while (err > eps) and (ct < evalMax) and (it < nIter):
        # Perform line search
        p = d0 / norm(d0)
        if (lin == 0):
            m = np.dot(dF0, p)
            alp, f0, k = backtrack(x0, f, m, p, f0, em=evalMax - ct)
            ct += k
        elif (lin == 1):
            alp, f0, k = quad_fit(x0, f, p, f0)
            ct += k
        x0 = x0 + alp * p
        X = np.append(X, [x0], axis=0)

        # Compute conjugate direction
        if (ct + n < evalMax):
            dF1 = grad(x0, f, f0)
            ct += n
        else:
            return x0, f0, ct, X, it
        beta = max(np.dot(dF1, dF1 - dF0) / np.dot(dF0, dF0), 0)
        d1 = -dF1 + beta * d0
        # Swap old directions
        d0 = d1
        dF0 = dF1
        # Compute error (norm of grad)
        err = norm(dF0)
        # Iterate counter
        it += 1

    # Complete CG solve
    return x0, f0, ct, X, it
Exemplo n.º 3
0
def feasibility_problem(g,x0,evalMax,slack=1e-3,eps=1e-3,nIter=100,h=1e-2):
    """Feasibility problem via CG solver
    Usage
    xf, gf, ct, X, it = feasibility_problem(g,x0,evalMax)

    Arguments
    g       = leq constraint function R^n->R^k
    x0      = initial guess
    evalMax = maximum function evaluations

    Keyword arguments
    slack   = slackness on constraints
    eps     = convergence criterion

    Returns
    xf      = feasible point
    gf      = constraint values
    ct      = number of function evaluations
    X       = sequence of points
    it      = number of iterations used
    """
    # Setup
    f   = lambda x: ext_obj(g(x),slack)

    ct  = 0              # Function calls
    it  = 0              # Iteration count
    x0  = np.array(x0)   # initial guess
    X   = np.array([x0]) # point history
    n   = np.size(x0)    # dim of problem
    g0  = g(x0);         ct += 1
    f0  = ext_obj(g0,slack)
    err = eps * 2        # initial error
    # Check for feasibility
    if feasible(g0):
        return x0, g0, ct, X, it

    # Initial direction: steepest descent
    dF0 = grad(x0,f,f0);      ct += n
    d0  = -dF0

    ### Main loop
    while (not feasible(g0)) and (ct<evalMax) and (it<nIter):
        # Perform line search
        p  = d0 / norm(d0)
        m = np.dot(dF0,p)
        alp, f0, k = backtrack(x0,f,m,p,f0,em=evalMax-ct,alp=5)
        ct += k
        x0 = x0 + alp*p
        g0 = g(x0); ct += 1
        X = np.append(X,[x0],axis=0)
        
        # Compute conjugate direction
        if (ct+n<evalMax):
            dF1 = grad(x0,f,f0); ct += n
        else:
            return x0, g0, ct, X, it
        beta = max(np.dot(dF1,dF1-dF0)/np.dot(dF0,dF0),0)
        d1 = -dF1 + beta*d0
        # Swap old directions
        d0  = d1
        dF0 = dF1
        # Compute error (norm of grad)
        err = norm(dF0)
        # Iterate counter
        it += 1

    # Complete CG solve
    return x0, g0, ct, X, it