示例#1
0
    def find_min(self):
        start_time = time.time()

        (interval, alpha, epsilon) = self.function.get_initial_data()
        x = self.__get_random_value(interval)
        self.x_best = x
        self.x_local_best = x
        gradient = nd.Gradient(self.function.get_formula())
        hessian = nd.Hessian(self.function.get_formula())
        hessian_inverse = np.linalg.inv(hessian(x))
        gradient_value = gradient(x)

        while time.time() - start_time < self.time:  # dopóki czas się nie skończy
            i = 0
            if len(self.tabuList) > self.tabuLength:
                self.tabuList.pop(0)
            while time.time() - start_time < self.time and np.linalg.norm(
                    gradient_value) >= epsilon and i < self.n:
                x = x - alpha * hessian_inverse.dot(gradient_value)
                gradient_value = gradient(x)
                hessian_inverse = np.linalg.inv(hessian(x))
                if self.function.get_formula()(x) < self.function.get_formula()(self.x_local_best):
                    self.x_local_best = x
                i += 1

            if self.function.get_formula()(self.x_local_best) < self.function.get_formula()(self.x_best):
                self.x_best = x
                self.tabuList.append([round(x_i, 3) for x_i in x]) #do tabu trafiają zaokrąglone x
            x = self.__get_random_value(interval)
            while [round(x_i, 3) for x_i in x] in self.tabuList: #czy wylosowany x jest w tabu
                x = self.__get_random_value(interval)
            gradient_value = gradient(x)
示例#2
0
    def test_on_scalar_function():
        def fun(x):
            return np.sum(x**2)

        dtrue = [2., 4., 6.]

        for method in ['forward', 'reverse']:  #

            dfun = nd.Gradient(fun, method=method)
            d = dfun([1, 2, 3])
            assert_allclose(d, dtrue)
示例#3
0
def VCopt_J(x_0, listCleanTests):
    global arrayCleanTests

    arrayCleanTests = list(listCleanTests)

    grad_error_fun = nda.Gradient(errorEnsemble_nda)

    Hess_error_fun = nda.Hessian(errorEnsemble_nda)

    x_sol = NTR_J_Solver(errorEnsemble_nda, grad_error_fun, Hess_error_fun,
                         x_0)

    return x_sol
    def __init__(self,
                 material_model,
                 test_data,
                 barrier_function=None,
                 use_cols=True,
                 reg_qd_lambda=None):
        """ The error function for the material model, and gradients / Hessian of it.

        :param function material_model: A material model definition.
        :param list test_data: pandas.DataFrame's of test data that the error is calculated over.
        :param BarrierFunction barrier_function: Adds a barrier function to the objective function, optional.
        :param float reg_qd_lambda: (optional) If not None, applies a regularization to the Q_\infty and D_\infty
                                    parameters. Can be float or None.

        - material_model = f(x, test_data), where x are an numpy.array of the model parameters, here x is assumed to
            have shape=(n, )
        - The test_data DataFrame's must contain a column of 'e_true' and 'Sigma_true'
        - The gradient and Hessian of the objective function are calculated using algorithmic differentiation,
            therefore, the specified function should be amenable to algorithmic differentiation.
        - The regularization term is f_R(x) = reg_qd_lambda / 2. * ((Q_\infty)^2 + (D_\infty)^2)
        """
        self.error_fun = material_model
        self.test_data = test_data
        self.use_cols = use_cols
        if reg_qd_lambda is not None:
            self.reg_lambda = reg_qd_lambda / 2.
        else:
            self.reg_lambda = reg_qd_lambda

        # Barrier function initialization
        if barrier_function is None:
            self.use_barrier = False
        elif isinstance(barrier_function, BarrierFunction):
            self.use_barrier = True
            self.barrier = barrier_function
        else:
            raise RuntimeError("Improper barrier function specified.")

        # Gradient and Hessian definitions, this must always be after the barrier function is defined
        # The grad/hess are defined here so that nda is not set-up multiple times
        self.__grad_error_fun = nda.Gradient(self.value)
        self.__hess_error_fun = nda.Hessian(self.value)
        return
# GRADIENT COMPUTATION
# --------------------

gradient_N_list = [2, 4, 8, 16, 32, 64, 96]
# gradient_N_list = [20]

results_gradient_list = []
for N in gradient_N_list:
    print 'N=', N
    results_gradient = np.zeros((4, 3))
    # algopy, UTPS variant
    f = benchmark1.F(N)
    f0 = f(3 * np.ones(N))
    t = time.time()
    gradient = algopy.Gradient(f, method='forward')
    preproc_time = time.time() - t
    t = time.time()
    ref_g = gradient(3 * np.ones(N))
    run_time = time.time() - t
    results_gradient[method['algopy_reverse']] = run_time, 0.0, preproc_time

    # scientifc
    f = benchmark1.F(N)
    t = time.time()
    gradient = scientific.Gradient(f)
    preproc_time = time.time() - t
    t = time.time()
    g = gradient(3 * np.ones(N))
    run_time = time.time() - t
    results_gradient[method['scientific']] = run_time, np.linalg.norm(
示例#6
0
    def augmented_lagrangian_opt(self, x, constraint):
        """ Augmented Lagrangian optimization procedure for inequality constraints.

        :param np.array x: Initial value of updated Voce-Chaboche model parameters.
        :param Constraint constraint: Constraints to the Augmented Lagrangian Optimization procedure.
        :return np.array: Optimized Voce-Chaboche model parameters.

        -The order of the parameters is: [E, sy0, qInf, b, dInf, a, C1, gamma1, ..., CN, gammaN] for N backstresses.
        -The strain column in the DataFrames should be labeled as "e_true", the stress should be labled as "Sigma_true".

        The Augmented Lagrangian procedure used in this function follows Bierlaire 2015 and Bertsekas 2016.
        """
        # Define constant values
        lagrangian_step_limit = self.maximum_auglag_iterations
        tol = self.auglag_tolerance
        # Tolerance for ill-conditioned cases. Accepts the solution as an approximation if the trust-region radius and
        # the scaled step size are too small (TOL_Approx)
        tol_approx = 1e-3
        approx_it_limit = 15  # only accept a limited number of ill-conditioned iterations.

        # Trust region radius
        trust_radius = 10.0  # \Delta in Bierlaire

        # Augmented lagrangian parameters
        penalty_parameter = 10.  # c in Bierlaire
        eta_hat_zero = 0.1258925
        tau = 10
        alpha = 0.1
        beta = 0.9
        tol_0 = 1. / penalty_parameter
        ntr_precision = 1. / penalty_parameter
        eta_al = eta_hat_zero / penalty_parameter ** alpha

        # Make sure the constraint is initialized
        constraint.update_variables(x)
        g = constraint.get_g(x)

        # Inequality Lagrange multipliers
        miu = np.zeros(len(g))

        # Arguments for the NTR procedure
        ntr_parameters = {'c': penalty_parameter, 'Delta': trust_radius, 'miu': miu, 'tol_k': ntr_precision}

        # Define the error function, and the gradient / Hessian for each set of test data with AlgoPy
        grad_error_fun = nda.Gradient(self.error_ensemble_nda)
        hess_error_fun = nda.Hessian(self.error_ensemble_nda)

        # Reset the total iterations to zero
        self.total_iterations = 0

        # Run the optimization
        num_of_approx_it = 0
        for i in range(int(lagrangian_step_limit)):
            print("##########      New Lagrangian Step      ###########")

            # Solve the trust region sub-problem
            x_init = x * 1.0
            ntr_parameters['Delta'] = trust_radius  # this is only here because of the approximate tolerance
            ntr_parameters['c'] = penalty_parameter
            ntr_parameters['miu'] = miu
            ntr_parameters['tol_k'] = ntr_precision
            [x, Delta2, nit] = self.ntr_j_solver_lag(x, ntr_parameters, constraint, grad_error_fun, hess_error_fun)

            # Update the constraints
            constraint.update_variables(x)
            g = constraint.get_g(x)
            g_max = np.maximum(np.zeros(np.shape(g)), g)
            ineqaulity_constraint_norm = np.linalg.norm(g_max)

            if ineqaulity_constraint_norm <= eta_al:
                # Update the Lagrange multipliers
                for j in range(len(miu)):
                    miu[j] = max(0, miu[j] + penalty_parameter * g[j])
                ntr_precision = ntr_precision / penalty_parameter
                eta_al = eta_al / penalty_parameter ** beta
            else:
                # Update the penalty parameter
                penalty_parameter = tau * penalty_parameter
                ntr_precision = tol_0 / penalty_parameter
                eta_al = eta_hat_zero / penalty_parameter ** alpha

            # Check convergence
            step_size = np.linalg.norm(x - x_init) * 1.0
            grad_lagrangian = self.grad_lagrangian(x, grad_error_fun, penalty_parameter, miu, constraint)

            # Check that gradient is nearly 0, and all the constraints are satisfied
            if np.linalg.norm(grad_lagrangian) < tol and ineqaulity_constraint_norm < tol:
                print "####################################################"
                print "### SUCCESSFUL AUGMENTED LAGRANGIAN OPTIMIZATION ###"
                print "####################################################"
                print "########## TERMINATING AUGMENTED LAGRANGIAN ########"
                print "####################################################"
                print ("x = ", x)
                break
            elif Delta2 < min_trust_radius(x):
                print "####################################################"
                print "###### EXITING BECAUSE OF TRUST REGION RADIUS ######"
                print "####################################################"
                print ("x = ", x)
                break
            elif self.total_iterations >= self.maximum_total_iterations:
                print "####################################################"
                print "######## EXITING BECAUSE OF TOTAL ITERATIONS #######"
                print "####################################################"
                print ("x = ", x)
                break
            elif self.accepting_approx_its and step_size < tol_approx and Delta2 < tol_approx:
                print " WARNING: SECONDARY CONVERGENCE CRITERIA TRIGGERED. NORM OF GRADIENT NOT WITHIN TOLERANCE."
                print " ONLY AN APPROXIMATE SOLUTION IS OBTAINED"
                num_of_approx_it = num_of_approx_it + 1
                trust_radius = 1.0
                if num_of_approx_it > approx_it_limit:
                    print "####################################"
                    print "# TERMINATING AUGMENTED LAGRANGIAN #"
                    print "####################################"
                    print ("x = ", x)
                    break

        return x
示例#7
0
def AugLag_Opt(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup,
               listCleanTests):
    global arrayCleanTests

    arrayCleanTests = list(listCleanTests)

    ## Augmented Lagrangian - cf. Bierlaire 2015 and Bertsekas 2016

    # Initialization

    x = np.array(x) * 1.0

    itLag = 1e3

    TOL = 1e-10
    TOL_Approx = 1e-3  # tolerance for ill-conditioned cases. Accepts the solution as an approximation if the trust-region radius and the scaled step size are too small (TOL_Approx)

    # NTR parameters

    Delta = 10.0

    # Augmented lagrangian parameters

    c = 10.
    eta_hat_zero = 0.1258925
    tau = 10
    alpha = 0.1
    beta = 0.9
    tol_0 = 1. / c
    tol_k = 1. / c
    eta_al = eta_hat_zero / c**alpha

    # Correction of initial point for feasible dual solution

    nBack = int((len(x) - 4) / 2)

    rho_iso_start = (rho_iso_inf + rho_iso_sup) / 2.
    rho_yield_start = (rho_yield_inf + rho_yield_sup) / 2.

    x[2] = -(1 - rho_yield_start) / (
        1 - (rho_iso_start - 1) / rho_iso_start) * x[1]

    sum_of_ck_gam_k = -(rho_iso_start - 1) / rho_iso_start * x[2]

    for k in range(nBack):
        x[4 + 2 * k] = sum_of_ck_gam_k / (nBack * 1.)
        x[5 + 2 * k] = 1.

    # Initial inequality function vector

    g = makeG(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup)

    # Inequality multipliers

    miu = np.zeros(len(g))

    # Defining gradient and Hessian of errors function with AlgoPy

    grad_error_fun = nda.Gradient(errorEnsemble_nda)

    Hess_error_fun = nda.Hessian(errorEnsemble_nda)

    approxIt = 0
    approxIt_lim = 10  # only accept a limited number of ill-conditioned iterations.

    for i in range(int(itLag)):

        print("##########      New Lagrangian Step      ###########")

        x_1 = x * 1.0

        Larg = [
            x, Delta, miu, c, g, rho_iso_inf, rho_iso_sup, rho_yield_inf,
            rho_yield_sup
        ]

        Larg = NTR_J_Solver_Lag(Lagrangian, grad_error_fun, Hess_error_fun,
                                Larg, tol_k)

        x, Delta, miu, c, g, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup = Larg

        sum_of_ck_gam_k = 0.0

        for k in range(nBack):
            sum_of_ck_gam_k = sum_of_ck_gam_k + x[4 + 2 * k] / x[5 + 2 * k]

        g = makeG(x, rho_iso_inf, rho_iso_sup, rho_yield_inf, rho_yield_sup)

        g_max = np.maximum(np.zeros(len(g)), g)
        norm_ineq = np.sqrt(np.dot(g_max, g_max))

        if norm_ineq <= eta_al:

            for j in range(len(miu)):
                miu[j] = max(0, miu[j] + c * g[j])

            tol_k = tol_k / c
            eta_al = eta_al / c**beta

        else:

            c = tau * c
            tol_k = tol_0 / c
            eta_al = eta_hat_zero / c**alpha

        d = (x - x_1) * 1.0

        gradLag_vec = gradLag(Larg, grad_error_fun)

        # print ("Norm gradLag=", np.sqrt(np.dot(gradLag_vec, gradLag_vec)), " ,Norm_ineq=", norm_ineq)

        if np.sqrt(np.dot(gradLag_vec, gradLag_vec)) < TOL and norm_ineq < TOL:
            print "####################################################"
            print "### SUCCESSFUL AUGMENTED LAGRANGIAN OPTIMIZATION ###"
            print "####################################################"
            print "########## TERMINATING AUGMENTED LAGRANGIAN ########"
            print "####################################################"
            print("x = ", x)
            break
        elif np.linalg.norm(d) < TOL_Approx and Delta < TOL_Approx:
            print " WARNING: SECONDARY CONVERGENCE CRITERIA TRIGGERED. NORM OF GRADIENT NOT WITHIN TOLERANCE."
            print " ONLY AN APPROXIMATE SOLUTION IS OBTAINED"
            approxIt = approxIt + 1
            Delta = 1.0
            if approxIt > approxIt_lim:
                print "####################################"
                print "# TERMINATING AUGMENTED LAGRANGIAN #"
                print "####################################"
                print("x = ", x)
                break

            else:
                continue

    return x