Esempio n. 1
0
def singleEuler(nu, u, v, Gu, Gv, dx, dy, dt, Nx, Ny, c1, c2):
    '''
    '''

    updGhosts(u, v)

    CalG(nu, u, v, dx, dy, Gu, Gv, c1)

    u += c2 * dt * Gu
    v += c2 * dt * Gv

    b = numpy.zeros((Nx+2, Ny+2))
    b[1:-1, 1:-1] = (u[2:-1, 1:-1] - u[1:-2, 1:-1]) / dx + \
                    (v[1:-1, 2:-1] - v[1:-1, 1:-2]) / dy

    p, Nitr = CG(Nx, Ny, numpy.zeros((Nx+2, Ny+2)),
                               b, dx, dy, 1e-15, 'N', refP=0)

    u[2:-2, 1:-1] -= (p[2:-1, 1:-1] - p[1:-2, 1:-1]) / dx
    v[1:-1, 2:-2] -= (p[1:-1, 2:-1] - p[1:-1, 1:-2]) / dy

    return u, v, p, Gu, Gv
def optimize():

    try:
        tolerance = math.pow(10, -1 *
                             int(entry4.get()))  # Toleranzschwelle des Nutzers
    except:
        tolerance = 5 * math.pow(10, -1 * 8)

    if numberOfFunctions < 5:
        number = numberOfFunctions
    else:
        number = 5

    chooseFunctionsAutomatic(number)
    iteration = 0
    x = np.zeros(dim)
    p = CG(hesse_f_N(x), -1 * grad_f(x), 10 * dim, math.pow(10, -5), x)
    F = f(x)
    Grad = grad_f(x)

    alpha = 1  # Armijo-Backtrackin-Verfahren
    while f(x + (alpha * p)) > F + 0.5 * alpha * np.dot(Grad, p):
        alpha = 0.5 * alpha

    xold = np.copy(x)  # Sicherung x
    x = x + alpha * p  # Update von x
    F = f(x)
    Grad = grad_f(x)

    iteration = 1

    while ((np.linalg.norm(Grad) > tolerance * (1 + (abs(F)))) and
           (abs(np.linalg.norm(Grad) - np.linalg.norm(grad_f(xold)))) > math.pow(10, -7)) \
            or (abs(F - f(xold)) > math.pow(10, -8) * (1 + abs(F)))\
            or (np.linalg.norm(xold - x) > math.pow(10, -8) * (1 + np.linalg.norm(x))):

        if np.linalg.norm(xold -
                          x) <= math.pow(10, -6) * (1 + np.linalg.norm(x)):
            if number < numberOfFunctions:
                addFunctionsAutomatic()
                number += 1
            else:
                break

        p = CG(hesse_f_N(x), -1 * Grad, 10 * dim, math.pow(10, -4), x)

        alpha = 1  # Armijo-Backtracking-Verfahren
        while f(x + (alpha * p)) > F + 0.5 * alpha * np.dot(Grad, p):
            alpha = 0.5 * alpha
        xold = np.copy(x)  # Sicherung x
        x = x + alpha * p  # Update von x
        F = f(x)
        Grad = grad_f(x)
        iteration += 1

    print("Iterationen: " + str(iteration - 1))
    print("Anzahl gewählter Funktionen: " + str(number))
    print("Minimierer x der Funktionen ist: " + str(x))
    print("Der Fehler im Gradienten liegt bei: " + str(np.linalg.norm(Grad)))

    print("f(x) = " + str(F))
    print("Norm Gradient: " + str(np.linalg.norm(Grad)))
    print("Toleranz Gradient: " + str(tolerance * (1 + abs(F))))
    print("Unterschied Norm Gradient: " +
          str(abs(np.linalg.norm(Grad) - np.linalg.norm(Grad))))
    print("Toleranz Unterschied Gradient: " + str((10**-7)))
    print("Unterschied f: " + str(abs(F - f(xold))))
    print("Tolleranz f: " + str((10**-8) * (1 + abs(F))))
    print("Unterschied x: " + str(np.linalg.norm(xold - x)))
    print("Tolleranz x: " + str(math.pow(10, -8) * (1 + np.linalg.norm(x))))
Esempio n. 3
0
def uncon(func, x0, max_iter, tol):

    method = "BFGS_LS"
    mode = 0  # solve mode
    #mode = 1 # analysis mode

    # get the obj function and gradient

    f, g = func()

    n = x0.shape[0]

    if (method == "BFGS_LS"):

        V0 = np.matrix(np.eye(n))

        p = BFGS.BFGS(f, g, x0, V0, n, mode)

        if (mode == 0):

            x, J = p.optimize(tol)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = p.optimize(tol)

    elif (method == "BFGS_TR"):

        B_0 = np.matrix(np.eye(n))

        Delta_0 = 1.0
        Delta_max = 10.0

        if (mode == 0):

            x, J = TR.trustRegion(Delta_0, Delta_max, tol, \
             x0, B_0, f, g, mode)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = TR.trustRegion(Delta_0, Delta_max, tol, \
             x0, B_0, f, g, mode)

    elif (method == "CG"):

        p = CG.CG(f, g, x0, mode)
        if (mode == 0):

            x, J = p.optimize(tol)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = p.optimize(tol)

    if (mode == 0):

        return x, J

    elif (mode == 1):

        return x_list, n_iter_list, log_g_norm_list