コード例 #1
0
ファイル: zad1.py プロジェクト: jsz5/metaheuristic_algorithms
    def find_min(self):
        start_time = time.time()

        (interval, alpha, epsilon) = self.function.get_initial_data()
        x = self.__get_random_value(interval)
        self.x_best = x
        self.x_local_best = x
        gradient = nd.Gradient(self.function.get_formula())
        hessian = nd.Hessian(self.function.get_formula())
        hessian_inverse = np.linalg.inv(hessian(x))
        gradient_value = gradient(x)

        while time.time() - start_time < self.time:  # dopóki czas się nie skończy
            i = 0
            if len(self.tabuList) > self.tabuLength:
                self.tabuList.pop(0)
            while time.time() - start_time < self.time and np.linalg.norm(
                    gradient_value) >= epsilon and i < self.n:
                x = x - alpha * hessian_inverse.dot(gradient_value)
                gradient_value = gradient(x)
                hessian_inverse = np.linalg.inv(hessian(x))
                if self.function.get_formula()(x) < self.function.get_formula()(self.x_local_best):
                    self.x_local_best = x
                i += 1

            if self.function.get_formula()(self.x_local_best) < self.function.get_formula()(self.x_best):
                self.x_best = x
                self.tabuList.append([round(x_i, 3) for x_i in x]) #do tabu trafiają zaokrąglone x
            x = self.__get_random_value(interval)
            while [round(x_i, 3) for x_i in x] in self.tabuList: #czy wylosowany x jest w tabu
                x = self.__get_random_value(interval)
            gradient_value = gradient(x)
コード例 #2
0
def _run_hamiltonian(verbose=True):
    c = ClassicalHamiltonian()
    if verbose:
        print(c.potential(array([-0.5, 0.5])))
        print(c.potential(array([-0.5, 0.0])))
        print(c.potential(array([0.0, 0.0])))

    xopt = optimize.fmin(c.potential, c.initialposition(), xtol=1e-10)

    hessian = nd.Hessian(c.potential)

    H = hessian(xopt)
    true_H = np.array([[5.23748385e-12, -2.61873829e-12],
                       [-2.61873829e-12, 5.23748385e-12]])
    error_estimate = np.NAN
    if verbose:
        print(xopt)
        print('H', H)
        print('H-true_H', np.abs(H - true_H))
        # print('error_estimate', info.error_estimate)

        eigenvalues = linalg.eigvals(H)
        normal_modes = c.normal_modes(eigenvalues)

        print('eigenvalues', eigenvalues)
        print('normal_modes', normal_modes)
    return H, error_estimate, true_H
コード例 #3
0
    def test_hessian_cosIx_yI_at_I0_0I():
        # cos(x-y), at (0,0)

        def fun(xy):
            return np.cos(xy[0] - xy[1])
        htrue = [[-1., 1.], [1., -1.]]
        methods = ['forward', ]  # 'reverse']

        for method in methods:
            Hfun2 = nd.Hessian(fun, method=method)
            h2 = Hfun2([0, 0])
            # print(method, (h2-np.array(htrue)))
            assert_array_almost_equal(h2, htrue)
コード例 #4
0
ファイル: RESSPyLab.py プロジェクト: chengsfsu/RESSPyLab
def VCopt_J(x_0, listCleanTests):
    global arrayCleanTests

    arrayCleanTests = list(listCleanTests)

    grad_error_fun = nda.Gradient(errorEnsemble_nda)

    Hess_error_fun = nda.Hessian(errorEnsemble_nda)

    x_sol = NTR_J_Solver(errorEnsemble_nda, grad_error_fun, Hess_error_fun,
                         x_0)

    return x_sol
コード例 #5
0
    def test_hessian_cos_x_y__at_0_0():
        # cos(x-y), at (0,0)

        def fun(xy):
            return np.cos(xy[0] - xy[1])

        htrue = [[-1., 1.], [1., -1.]]
        methods = ['forward', ]  # 'reverse']

        for method in methods:
            h_fun = nd.Hessian(fun, method=method)
            h2 = h_fun([0, 0])
            # print(method, (h2-np.array(htrue)))
            assert_allclose(h2, htrue)
コード例 #6
0
ファイル: uvc_model.py プロジェクト: chengsfsu/RESSPyLab
def uvc_get_hessian(x, data):
    """ Returns the Hessian of the material model error function for a given set of test data evaluated at x.

    :param np.array x: Updated Voce-Chaboche material model parameters.
    :param list data: (pd.DataFrame) Stress-strain history for each test considered.
    :return np.array: Hessian matrix of the error function.
    """
    def f(xi):
        val = 0.
        for d in data:
            val += error_single_test_uvc(xi, d)
        return val

    hess_fun = nda.Hessian(f)
    return hess_fun(x)
コード例 #7
0
    def __init__(self,
                 material_model,
                 test_data,
                 barrier_function=None,
                 use_cols=True,
                 reg_qd_lambda=None):
        """ The error function for the material model, and gradients / Hessian of it.

        :param function material_model: A material model definition.
        :param list test_data: pandas.DataFrame's of test data that the error is calculated over.
        :param BarrierFunction barrier_function: Adds a barrier function to the objective function, optional.
        :param float reg_qd_lambda: (optional) If not None, applies a regularization to the Q_\infty and D_\infty
                                    parameters. Can be float or None.

        - material_model = f(x, test_data), where x are an numpy.array of the model parameters, here x is assumed to
            have shape=(n, )
        - The test_data DataFrame's must contain a column of 'e_true' and 'Sigma_true'
        - The gradient and Hessian of the objective function are calculated using algorithmic differentiation,
            therefore, the specified function should be amenable to algorithmic differentiation.
        - The regularization term is f_R(x) = reg_qd_lambda / 2. * ((Q_\infty)^2 + (D_\infty)^2)
        """
        self.error_fun = material_model
        self.test_data = test_data
        self.use_cols = use_cols
        if reg_qd_lambda is not None:
            self.reg_lambda = reg_qd_lambda / 2.
        else:
            self.reg_lambda = reg_qd_lambda

        # Barrier function initialization
        if barrier_function is None:
            self.use_barrier = False
        elif isinstance(barrier_function, BarrierFunction):
            self.use_barrier = True
            self.barrier = barrier_function
        else:
            raise RuntimeError("Improper barrier function specified.")

        # Gradient and Hessian definitions, this must always be after the barrier function is defined
        # The grad/hess are defined here so that nda is not set-up multiple times
        self.__grad_error_fun = nda.Gradient(self.value)
        self.__hess_error_fun = nda.Hessian(self.value)
        return
コード例 #8
0
print 'results_gradients=\n', results_gradients

# HESSIAN COMPUTATION
# -------------------
print 'starting hessian computation '
results_hessian_list = []
hessian_N_list = [1, 2, 4, 8, 16, 32, 64]
# hessian_N_list = [2]

for N in hessian_N_list:
    print 'N=', N
    results_hessian = np.zeros((4, 3))

    f = benchmark1.F(N)
    t = time.time()
    hessian = algopy.Hessian(f, method='forward')
    preproc_time = time.time() - t
    t = time.time()
    ref_H = hessian(3 * np.ones(N))
    run_time = time.time() - t
    results_hessian[method['algopy_reverse']] = run_time, 0.0, preproc_time

    #
    # Scientific
    f = benchmark1.F(N)
    t = time.time()
    hessian = scientific.Hessian(f)
    preproc_time = time.time() - t
    t = time.time()
    H = hessian(3 * np.ones(N))
    run_time = time.time() - t
コード例 #9
0
 def test_run_hamiltonian(self):
     h, _error_estimate, true_h = run_hamiltonian(nd.Hessian(None),
                                                  verbose=False)
     assert (np.abs((h - true_h) / true_h) < 1e-4).all()
コード例 #10
0
    def augmented_lagrangian_opt(self, x, constraint):
        """ Augmented Lagrangian optimization procedure for inequality constraints.

        :param np.array x: Initial value of updated Voce-Chaboche model parameters.
        :param Constraint constraint: Constraints to the Augmented Lagrangian Optimization procedure.
        :return np.array: Optimized Voce-Chaboche model parameters.

        -The order of the parameters is: [E, sy0, qInf, b, dInf, a, C1, gamma1, ..., CN, gammaN] for N backstresses.
        -The strain column in the DataFrames should be labeled as "e_true", the stress should be labled as "Sigma_true".

        The Augmented Lagrangian procedure used in this function follows Bierlaire 2015 and Bertsekas 2016.
        """
        # Define constant values
        lagrangian_step_limit = self.maximum_auglag_iterations
        tol = self.auglag_tolerance
        # Tolerance for ill-conditioned cases. Accepts the solution as an approximation if the trust-region radius and
        # the scaled step size are too small (TOL_Approx)
        tol_approx = 1e-3
        approx_it_limit = 15  # only accept a limited number of ill-conditioned iterations.

        # Trust region radius
        trust_radius = 10.0  # \Delta in Bierlaire

        # Augmented lagrangian parameters
        penalty_parameter = 10.  # c in Bierlaire
        eta_hat_zero = 0.1258925
        tau = 10
        alpha = 0.1
        beta = 0.9
        tol_0 = 1. / penalty_parameter
        ntr_precision = 1. / penalty_parameter
        eta_al = eta_hat_zero / penalty_parameter ** alpha

        # Make sure the constraint is initialized
        constraint.update_variables(x)
        g = constraint.get_g(x)

        # Inequality Lagrange multipliers
        miu = np.zeros(len(g))

        # Arguments for the NTR procedure
        ntr_parameters = {'c': penalty_parameter, 'Delta': trust_radius, 'miu': miu, 'tol_k': ntr_precision}

        # Define the error function, and the gradient / Hessian for each set of test data with AlgoPy
        grad_error_fun = nda.Gradient(self.error_ensemble_nda)
        hess_error_fun = nda.Hessian(self.error_ensemble_nda)

        # Reset the total iterations to zero
        self.total_iterations = 0

        # Run the optimization
        num_of_approx_it = 0
        for i in range(int(lagrangian_step_limit)):
            print("##########      New Lagrangian Step      ###########")

            # Solve the trust region sub-problem
            x_init = x * 1.0
            ntr_parameters['Delta'] = trust_radius  # this is only here because of the approximate tolerance
            ntr_parameters['c'] = penalty_parameter
            ntr_parameters['miu'] = miu
            ntr_parameters['tol_k'] = ntr_precision
            [x, Delta2, nit] = self.ntr_j_solver_lag(x, ntr_parameters, constraint, grad_error_fun, hess_error_fun)

            # Update the constraints
            constraint.update_variables(x)
            g = constraint.get_g(x)
            g_max = np.maximum(np.zeros(np.shape(g)), g)
            ineqaulity_constraint_norm = np.linalg.norm(g_max)

            if ineqaulity_constraint_norm <= eta_al:
                # Update the Lagrange multipliers
                for j in range(len(miu)):
                    miu[j] = max(0, miu[j] + penalty_parameter * g[j])
                ntr_precision = ntr_precision / penalty_parameter
                eta_al = eta_al / penalty_parameter ** beta
            else:
                # Update the penalty parameter
                penalty_parameter = tau * penalty_parameter
                ntr_precision = tol_0 / penalty_parameter
                eta_al = eta_hat_zero / penalty_parameter ** alpha

            # Check convergence
            step_size = np.linalg.norm(x - x_init) * 1.0
            grad_lagrangian = self.grad_lagrangian(x, grad_error_fun, penalty_parameter, miu, constraint)

            # Check that gradient is nearly 0, and all the constraints are satisfied
            if np.linalg.norm(grad_lagrangian) < tol and ineqaulity_constraint_norm < tol:
                print "####################################################"
                print "### SUCCESSFUL AUGMENTED LAGRANGIAN OPTIMIZATION ###"
                print "####################################################"
                print "########## TERMINATING AUGMENTED LAGRANGIAN ########"
                print "####################################################"
                print ("x = ", x)
                break
            elif Delta2 < min_trust_radius(x):
                print "####################################################"
                print "###### EXITING BECAUSE OF TRUST REGION RADIUS ######"
                print "####################################################"
                print ("x = ", x)
                break
            elif self.total_iterations >= self.maximum_total_iterations:
                print "####################################################"
                print "######## EXITING BECAUSE OF TOTAL ITERATIONS #######"
                print "####################################################"
                print ("x = ", x)
                break
            elif self.accepting_approx_its and step_size < tol_approx and Delta2 < tol_approx:
                print " WARNING: SECONDARY CONVERGENCE CRITERIA TRIGGERED. NORM OF GRADIENT NOT WITHIN TOLERANCE."
                print " ONLY AN APPROXIMATE SOLUTION IS OBTAINED"
                num_of_approx_it = num_of_approx_it + 1
                trust_radius = 1.0
                if num_of_approx_it > approx_it_limit:
                    print "####################################"
                    print "# TERMINATING AUGMENTED LAGRANGIAN #"
                    print "####################################"
                    print ("x = ", x)
                    break

        return x
コード例 #11
0
ファイル: RESSPyLab.py プロジェクト: chengsfsu/RESSPyLab
def AugLag_Opt(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup,
               listCleanTests):
    global arrayCleanTests

    arrayCleanTests = list(listCleanTests)

    ## Augmented Lagrangian - cf. Bierlaire 2015 and Bertsekas 2016

    # Initialization

    x = np.array(x) * 1.0

    itLag = 1e3

    TOL = 1e-10
    TOL_Approx = 1e-3  # tolerance for ill-conditioned cases. Accepts the solution as an approximation if the trust-region radius and the scaled step size are too small (TOL_Approx)

    # NTR parameters

    Delta = 10.0

    # Augmented lagrangian parameters

    c = 10.
    eta_hat_zero = 0.1258925
    tau = 10
    alpha = 0.1
    beta = 0.9
    tol_0 = 1. / c
    tol_k = 1. / c
    eta_al = eta_hat_zero / c**alpha

    # Correction of initial point for feasible dual solution

    nBack = int((len(x) - 4) / 2)

    rho_iso_start = (rho_iso_inf + rho_iso_sup) / 2.
    rho_yield_start = (rho_yield_inf + rho_yield_sup) / 2.

    x[2] = -(1 - rho_yield_start) / (
        1 - (rho_iso_start - 1) / rho_iso_start) * x[1]

    sum_of_ck_gam_k = -(rho_iso_start - 1) / rho_iso_start * x[2]

    for k in range(nBack):
        x[4 + 2 * k] = sum_of_ck_gam_k / (nBack * 1.)
        x[5 + 2 * k] = 1.

    # Initial inequality function vector

    g = makeG(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup)

    # Inequality multipliers

    miu = np.zeros(len(g))

    # Defining gradient and Hessian of errors function with AlgoPy

    grad_error_fun = nda.Gradient(errorEnsemble_nda)

    Hess_error_fun = nda.Hessian(errorEnsemble_nda)

    approxIt = 0
    approxIt_lim = 10  # only accept a limited number of ill-conditioned iterations.

    for i in range(int(itLag)):

        print("##########      New Lagrangian Step      ###########")

        x_1 = x * 1.0

        Larg = [
            x, Delta, miu, c, g, rho_iso_inf, rho_iso_sup, rho_yield_inf,
            rho_yield_sup
        ]

        Larg = NTR_J_Solver_Lag(Lagrangian, grad_error_fun, Hess_error_fun,
                                Larg, tol_k)

        x, Delta, miu, c, g, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup = Larg

        sum_of_ck_gam_k = 0.0

        for k in range(nBack):
            sum_of_ck_gam_k = sum_of_ck_gam_k + x[4 + 2 * k] / x[5 + 2 * k]

        g = makeG(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup)

        g_max = np.maximum(np.zeros(len(g)), g)
        norm_ineq = np.sqrt(np.dot(g_max, g_max))

        if norm_ineq <= eta_al:

            for j in range(len(miu)):
                miu[j] = max(0, miu[j] + c * g[j])

            tol_k = tol_k / c
            eta_al = eta_al / c**beta

        else:

            c = tau * c
            tol_k = tol_0 / c
            eta_al = eta_hat_zero / c**alpha

        d = (x - x_1) * 1.0

        gradLag_vec = gradLag(Larg, grad_error_fun)

        # print ("Norm gradLag=", np.sqrt(np.dot(gradLag_vec, gradLag_vec)), " ,Norm_ineq=", norm_ineq)

        if np.sqrt(np.dot(gradLag_vec, gradLag_vec)) < TOL and norm_ineq < TOL:
            print "####################################################"
            print "### SUCCESSFUL AUGMENTED LAGRANGIAN OPTIMIZATION ###"
            print "####################################################"
            print "########## TERMINATING AUGMENTED LAGRANGIAN ########"
            print "####################################################"
            print("x = ", x)
            break
        elif np.linalg.norm(d) < TOL_Approx and Delta < TOL_Approx:
            print " WARNING: SECONDARY CONVERGENCE CRITERIA TRIGGERED. NORM OF GRADIENT NOT WITHIN TOLERANCE."
            print " ONLY AN APPROXIMATE SOLUTION IS OBTAINED"
            approxIt = approxIt + 1
            Delta = 1.0
            if approxIt > approxIt_lim:
                print "####################################"
                print "# TERMINATING AUGMENTED LAGRANGIAN #"
                print "####################################"
                print("x = ", x)
                break

            else:
                continue

    return x