Beispiel #1
0
def penalty(f, gradf, g, gradg, x0, sigma_exp=2, tol=0.1):
    x = x0.copy()
    path = [ x.copy()]
    converged = False
    step = 1.0
    while not converged:
        sigma = .1*step**2.0

        # Funktion + aktuelle Straffunktionen
        def q(x):
            res = f(x)
            for lg in g(x):
                res += minimum(0, sigma*lg)**2
            return res
        def gradq(x):
            res = gradf(x)
            for lg, lgg in zip(g(x), gradg(x)):
                res += 2*minimum(0, sigma*lg)*sigma*lgg
            return res

        # steilster Abstieg mit Armijo-Schrittweite
        x = armijo_steepest_descent(q, gradq, x, tol)

        path.append(x.copy())

        if min(g(x)) > -tol:
            converged = True
        step += 1.0

    return array(path).transpose()