Exemple #1
0
def model_2_Grad_Decent_unconstrained_nice_problem():
    x = [3, 1, 3, 0, 0]
    x0 = [1, 0, 1, 0, 0]

    z, inner = generate_points(x, size=500, scale=0.7)
    z = generate_noise(z, 1 * 10 ** -1)

    plot_solution(x0, z, inner, rxy_tilde, 0, Metode=2)

    xf, nf, gradsf = BFGS_model_2(x0, z, inner, TOL=10 ** (-6), gradient_decent=1)
    plot_solution(xf, z, inner, rxy_tilde, nf, Metode=2)
    convergence_plot(gradsf, 2)
    return 0
Exemple #2
0
def model_2_BFGS_unconstrained_not_so_nice_problem():
    x = [0.008, 1, 0.008, 0, 0]
    x0 = [1, 0, 1, 0, 0]

    z, inner = generate_points(x, size=500)
    z = generate_noise(z, 1 * 10 ** -1)

    plot_solution(x0, z, inner, rxy_tilde, 0, Metode=2)

    xf, nf, gradsf = BFGS_model_2(x0, z, inner, TOL=10 ** (-10), gradient_decent=0)
    plot_solution(xf, z, inner, rxy_tilde, nf, Metode=2)
    convergence_plot(gradsf, 2)

    return 0
Exemple #3
0
def model_1_BFGS_unconstrained_nice_problem():
    x = [3, 1, 3, 0, 0]
    x0 = [1, 0, 1, 0, 0]

    z, inner = generate_points(x, size=500, scale=0.7)
    z = generate_noise(z, 1 * 10 ** -1)

    plot_solution(x0, z, inner, rxy, 0, Metode=1)

    xf, nf, gradsf = BFGS_model_1(x0, z, inner, TOL=10 ** (-10), gradient_decent=0)
    plot_solution(xf, z, inner, rxy, nf, Metode=1)
    convergence_plot(gradsf, 1)

    return 0
Exemple #4
0
def model_2_BFGS_constrained_nice_problem():
    x = [3, 1, 3, 0, 0]

    c = [c1, c2, c3, c4, c5]

    x0 = np.array([4, 1, 3, 0, 0])
    points, inner = generate_points(x, size=300, scale=1)

    points = generate_noise(points, 2 * 10 ** (-1))
    plot_solution(x0, points, inner, rxy_tilde, 0, 2)

    xf, itr, b_vals = beta_optimization(x0, B_func, grad_B, f2, grad2, 1, c, points, inner, 0.1, 1000, n=0, TOL=10 ** (-6))

    plot_solution(xf, points, inner, rxy_tilde, np.sum(itr), 2)
    convergence_plot_constr(b_vals)
    return 0
Exemple #5
0
            H = (np.eye(5) - rho * temp1) @ H @ (np.eye(5) -
                                                 rho * temp2) + rho * temp3
        print('n = ', n, "\t x=", xnew)
        n += 1
        funks = np.append(funks, f2(xnew, z, inner))

    return xnew, n - 1, funks


if __name__ == '__main__':
    #x = [0.01, 1, 0.1, 0, 0] #kult problem

    x = [3, 1, 3, 0, 0]

    x0 = np.array([5, 1, 0.1, 0, 0])
    points, inner = generate_points(x, size=500)

    Af, cf = constructproblem(x0)

    points = generate_noise(points, 2 * 10**(-1))

    plot_solution(x0, points, inner, rxy_tilde, 0, Metode=2)

    xf, nf, gradients = BFGS_model_2(x0,
                                     points,
                                     inner,
                                     TOL=10**(-10),
                                     gradient_decent=0)
    plot_solution(xf, points, inner, rxy_tilde, nf, Metode=2)
    convergence_plot(gradients, 2)
Exemple #6
0
if __name__ == '__main__':
    #x = [3, 1, 3, 0, 0]
    #x = [3, 1, 0.2, 0, 0]
    #x = [0.008, 1, 0.008, 0, 0] #not so nice problem

    x = [0.008, 1, 0.008, 0, 0]

    c = [c1, c2, c3, c4, c5]

    x0 = np.array([4, 1, 3, 0, 0])

    points, inner = generate_points_constr(x, size=200)

    points = generate_noise(points, 2 * 10**(-1))
    plot_solution(x, points, inner, rxy_tilde, 0, 2)

    print(B_func(x, f2, 0.1, c, points, inner))
    print(B_func(x0, f2, 0.1, c, points, inner))

    #xf = BFGS_constr(x0, B_func, grad_B, f2, grad2, 5, c, points, inner, 0.1, 1000, n = 0, TOL = 10 **(-3)) #general_BFGS(x, f, gradf, n=0, TOL=10**(-6))

    xf, itr, b_vals = beta_optimization(x0,
                                        B_func,
                                        grad_B,
                                        f2,
                                        grad2,
                                        1,
                                        c,
                                        points,
                                        inner,