def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        for self.optimization_iteration in range(1,self.max_iterations):
            KM.Logger.Print("")
            KM.Logger.Print("===============================================================================")
            KM.Logger.PrintInfo("ShapeOpt", timer.GetTimeStamp(), ": Starting optimization iteration ", self.optimization_iteration)
            KM.Logger.Print("===============================================================================\n")

            timer.StartNewLap()

            self.__initializeNewShape()

            self.__analyzeShape()

            self.__computeShapeUpdate()

            self.__logCurrentOptimizationStep()

            KM.Logger.Print("")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for current optimization step = ", timer.GetLapTime(), "s")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for total optimization so far = ", timer.GetTotalTime(), "s")

            if self.__isAlgorithmConverged():
                break
            else:
                self.__determineAbsoluteChanges()
Ejemplo n.º 2
0
    def _WriteCurrentValuesToFile(self):
        with open(self.complete_log_file_name, 'a') as csvfile:
            historyWriter = csv.writer(csvfile,
                                       delimiter=',',
                                       quotechar='|',
                                       quoting=csv.QUOTE_MINIMAL)
            row = []
            row.append("{:>4d}".format(self.current_index))

            objective_id = self.objectives[0]["identifier"].GetString()
            row.append(" {:> .5E}".format(self.history["response_value"]
                                          [objective_id][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["abs_change_objective"][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["rel_change_objective"][self.current_index]))

            for itr in range(self.constraints.size()):
                constraint_id = self.constraints[itr]["identifier"].GetString()
                row.append(" {:> .5E}".format(
                    self.history["response_value"][constraint_id][
                        self.current_index]))
                row.append(" {:> .5E}".format(
                    self.communicator.getReferenceValue(constraint_id)))

            row.append(" {:> .5E}".format(
                self.history["step_size"][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["inf_norm_s"][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["inf_norm_c"][self.current_index]))
            row.append("{:>25}".format(Timer().GetTimeStamp()))
            historyWriter.writerow(row)
Ejemplo n.º 3
0
    def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        for self.opt_iteration in range(1,self.algorithm_settings["max_iterations"].GetInt()+1):
            KM.Logger.Print("")
            KM.Logger.Print("===============================================================================")
            KM.Logger.PrintInfo("ShapeOpt", timer.GetTimeStamp(), ": Starting optimization iteration ",self.opt_iteration)
            KM.Logger.Print("===============================================================================\n")

            timer.StartNewLap()

            self.__InitializeNewShape()

            self.__AnalyzeShape()

            self.__PostProcessGradientsObtainedFromAnalysis()

            len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs, dir_ineqs =  self.__ConvertAnalysisResultsToLengthDirectionFormat()

            step_length = self.__DetermineMaxStepLength()

            len_bar_obj, len_bar_eqs, len_bar_ineqs = self.__ExpressInStepLengthUnit(len_obj, len_eqs, len_ineqs, step_length)

            dX_bar, process_details = self.__DetermineStep(len_bar_obj, dir_obj, len_bar_eqs, dir_eqs, len_bar_ineqs, dir_ineqs)

            dX = self.__ComputeShapeUpdate(dX_bar, step_length)

            values_to_be_logged = {}
            values_to_be_logged["len_bar_obj"] = len_bar_obj
            values_to_be_logged["len_bar_cons"] = self.__CombineConstraintDataToOrderedList(len_bar_eqs, len_bar_ineqs)
            values_to_be_logged["step_length"] = step_length
            values_to_be_logged["test_norm_dX_bar"] = process_details["test_norm_dX"]
            values_to_be_logged["bi_itrs"] = process_details["bi_itrs"]
            values_to_be_logged["bi_err"] = process_details["bi_err"]
            values_to_be_logged["adj_len_bar_obj"] = process_details["adj_len_obj"]
            values_to_be_logged["adj_len_bar_cons"] = self.__CombineConstraintDataToOrderedList(process_details["adj_len_eqs"], process_details["adj_len_ineqs"])
            values_to_be_logged["norm_dX"] = cm.NormInf3D(dX)

            self.__LogCurrentOptimizationStep(values_to_be_logged)

            KM.Logger.Print("")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for current optimization step = ", timer.GetLapTime(), "s")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for total optimization so far = ", timer.GetTotalTime(), "s")

            if self.__isAlgorithmConverged():
                break
Ejemplo n.º 4
0
    def Optimize(self):
        algorithm_name = self.optimization_settings["optimization_algorithm"]["name"].GetString()

        KM.Logger.Print("")
        KM.Logger.Print("===============================================================================")
        KM.Logger.PrintInfo("ShapeOpt", Timer().GetTimeStamp(), ": Starting optimization using the following algorithm: ", algorithm_name)
        KM.Logger.Print("===============================================================================\n")

        algorithm = algorithm_factory.CreateOptimizationAlgorithm(self.optimization_settings,
                                                                  self.analyzer,
                                                                  self.communicator,
                                                                  self.model_part_controller)
        algorithm.CheckApplicability()
        algorithm.InitializeOptimizationLoop()
        algorithm.RunOptimizationLoop()
        algorithm.FinalizeOptimizationLoop()

        KM.Logger.Print("")
        KM.Logger.Print("===============================================================================")
        KM.Logger.PrintInfo("ShapeOpt", "Finished optimization")
        KM.Logger.Print("===============================================================================\n")
Ejemplo n.º 5
0
 def _WriteCurrentValuesToFile(self):
     with open(self.complete_log_file_name, 'a') as csvfile:
         historyWriter = csv.writer(csvfile,
                                    delimiter=',',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
         row = []
         row.append("{:>12d}".format(self.current_index))
         row.append("{:>10d}".format(
             self.history["outer_iteration"][self.current_index]))
         row.append("{:>10d}".format(
             self.history["inner_iteration"][self.current_index]))
         objective_id = self.objectives[0]["identifier"].GetString()
         row.append(" {:> .5E}".format(
             self.history["lagrange_value"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["lagrange_value_relative_change"][
                 self.current_index]))
         row.append(" {:> .5E}".format(self.history["response_value"]
                                       [objective_id][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["abs_change_objective"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["rel_change_objective"][self.current_index]))
         row.append("  {:> .5E}".format(
             self.history["max_norm_objective_gradient"][
                 self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_lambda"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_value"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_scaling"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_factor"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["step_size"][self.current_index]))
         row.append("{:>25}".format(Timer().GetTimeStamp()))
         historyWriter.writerow(row)
    def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        current_lambda = self.lambda0
        penalty_scaling = self.penalty_scaling_0
        penalty_factor = self.penalty_factor_0

        total_iteration = 0
        is_design_converged = False
        is_max_total_iterations_reached = False
        previos_L = None

        for outer_iteration in range(1,self.max_outer_iterations+1):
            for inner_iteration in range(1,self.max_inner_iterations+1):

                total_iteration += 1
                timer.StartNewLap()

                KM.Logger.Print("=======================================================================================")
                KM.Logger.PrintInfo("ShapeOpt", timer.GetTimeStamp(), ": Starting iteration ",outer_iteration,".",inner_iteration,".",total_iteration,"(outer . inner . total)")
                KM.Logger.Print("=======================================================================================\n")

                # Initialize new shape
                self.model_part_controller.UpdateTimeStep(total_iteration)

                for node in self.design_surface.Nodes:
                    new_shape_change = node.GetSolutionStepValue(KSO.ALPHA_MAPPED) * node.GetValue(KSO.BEAD_DIRECTION) * self.bead_height
                    node.SetSolutionStepValue(KSO.SHAPE_CHANGE, new_shape_change)

                self.model_part_controller.DampNodalVariableIfSpecified(KSO.SHAPE_CHANGE)

                for node in self.design_surface.Nodes:
                    shape_update = node.GetSolutionStepValue(KSO.SHAPE_CHANGE,0) - node.GetSolutionStepValue(KSO.SHAPE_CHANGE,1)
                    node.SetSolutionStepValue(KSO.SHAPE_UPDATE, shape_update)

                self.model_part_controller.UpdateMeshAccordingInputVariable(KSO.SHAPE_UPDATE)
                self.model_part_controller.SetReferenceMeshToMesh()

                # Analyze shape
                self.communicator.initializeCommunication()
                self.communicator.requestValueOf(self.objectives[0]["identifier"].GetString())
                self.communicator.requestGradientOf(self.objectives[0]["identifier"].GetString())

                self.analyzer.AnalyzeDesignAndReportToCommunicator(self.optimization_model_part, total_iteration, self.communicator)

                objective_value = self.communicator.getStandardizedValue(self.objectives[0]["identifier"].GetString())
                objGradientDict = self.communicator.getStandardizedGradient(self.objectives[0]["identifier"].GetString())
                WriteDictionaryDataOnNodalVariable(objGradientDict, self.optimization_model_part, KSO.DF1DX)

                self.model_part_controller.DampNodalVariableIfSpecified(KSO.DF1DX)

                # Compute sensitivities w.r.t. scalar design variable alpha
                for node in self.design_surface.Nodes:
                    raw_gradient = node.GetSolutionStepValue(KSO.DF1DX)
                    bead_dir = node.GetValue(KSO.BEAD_DIRECTION)

                    dF1dalpha_i = self.bead_height*(raw_gradient[0]*bead_dir[0] + raw_gradient[1]*bead_dir[1] + raw_gradient[2]*bead_dir[2])
                    node.SetSolutionStepValue(KSO.DF1DALPHA, dF1dalpha_i)

                # Map gradient of objective
                self.mapper.InverseMap(KSO.DF1DALPHA, KSO.DF1DALPHA_MAPPED)

                # Compute scaling
                max_norm_objective_gradient = self.optimization_utilities.ComputeMaxNormOfNodalVariable(self.design_surface, KSO.DF1DALPHA_MAPPED)

                if outer_iteration == 1 and inner_iteration == min(3,self.max_inner_iterations):
                    if self.bead_side == "positive" or self.bead_side == "negative":
                        max_norm_penalty_gradient = 1.0
                    elif self.bead_side == "both":
                        max_norm_penalty_gradient = 2.0

                    penalty_scaling = max_norm_objective_gradient/max_norm_penalty_gradient

                # Compute penalization term
                penalty_value = 0.0
                if self.bead_side == "positive":
                    for node in self.design_surface.Nodes:
                        if not node.Is(KM.BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(KSO.ALPHA)
                            penalty_value += penalty_scaling*(alpha_i-alpha_i**2)

                            penalty_gradient_i = penalty_scaling*(1-2*alpha_i)
                            node.SetSolutionStepValue(KSO.DPDALPHA, penalty_gradient_i)

                elif self.bead_side == "negative":
                    for node in self.design_surface.Nodes:
                        if not node.Is(KM.BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(KSO.ALPHA)
                            penalty_value += penalty_scaling*(-alpha_i-alpha_i**2)

                            penalty_gradient_i = penalty_scaling*(-1-2*alpha_i)
                            node.SetSolutionStepValue(KSO.DPDALPHA, penalty_gradient_i)

                elif self.bead_side == "both":
                    for node in self.design_surface.Nodes:
                        if not node.Is(KM.BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(KSO.ALPHA)
                            penalty_value += penalty_scaling*(-alpha_i**2+1)

                            penalty_gradient_i = penalty_scaling*(-2*alpha_i)
                            node.SetSolutionStepValue(KSO.DPDALPHA, penalty_gradient_i)

                # Filter penalty term if specified
                if self.filter_penalty_term:
                    self.penalty_filter.InverseMap(KSO.DPDALPHA, KSO.DPDALPHA_MAPPED)

                # Compute value of Lagrange function
                L = objective_value + current_lambda*penalty_value + 0.5*penalty_factor*penalty_value**2
                if inner_iteration == 1:
                    dL_relative = 0.0
                else:
                    dL_relative = 100*(L/previos_L-1)

                # Compute gradient of Lagrange function
                if self.filter_penalty_term:
                    penalty_gradient_variable = KSO.DPDALPHA_MAPPED
                else:
                    penalty_gradient_variable = KSO.DPDALPHA
                for node in self.design_surface.Nodes:
                    dLdalpha_i = node.GetSolutionStepValue(KSO.DF1DALPHA_MAPPED) + current_lambda*node.GetSolutionStepValue(penalty_gradient_variable)
                    node.SetSolutionStepValue(KSO.DLDALPHA, dLdalpha_i)

                # Normalization using infinity norm
                dLdalpha_for_normalization = {}
                for node in self.design_surface.Nodes:
                    nodal_alpha = node.GetSolutionStepValue(KSO.ALPHA)
                    if nodal_alpha==self.lower_bound or nodal_alpha==self.upper_bound or node.Is(KM.BOUNDARY):
                        dLdalpha_for_normalization[node.Id] = 0.0
                    else:
                        dLdalpha_for_normalization[node.Id] = node.GetSolutionStepValue(KSO.DLDALPHA)**2

                max_value = math.sqrt(max(dLdalpha_for_normalization.values()))
                if max_value == 0.0:
                    max_value = 1.0

                # Compute updated design variable
                for node in self.design_surface.Nodes:
                    dalpha = -self.step_size*node.GetSolutionStepValue(KSO.DLDALPHA)/max_value
                    alpha_new = node.GetSolutionStepValue(KSO.ALPHA) + dalpha

                    # Enforce bounds
                    alpha_new = max(alpha_new, self.lower_bound)
                    alpha_new = min(alpha_new, self.upper_bound)

                    # Enforce constraints
                    if node.Is(KM.BOUNDARY):
                        alpha_new = 0.0

                    node.SetSolutionStepValue(KSO.ALPHA,alpha_new)

                    alpha_new_vectorized = alpha_new * node.GetValue(KSO.BEAD_DIRECTION)
                    node.SetSolutionStepValue(KSO.CONTROL_POINT_CHANGE,alpha_new_vectorized)

                # Map design variables
                self.mapper.Map(KSO.ALPHA, KSO.ALPHA_MAPPED)

                # Log current optimization step and store values for next iteration
                additional_values_to_log = {}
                additional_values_to_log["step_size"] = self.algorithm_settings["line_search"]["step_size"].GetDouble()
                additional_values_to_log["outer_iteration"] = outer_iteration
                additional_values_to_log["inner_iteration"] = inner_iteration
                additional_values_to_log["lagrange_value"] = L
                additional_values_to_log["lagrange_value_relative_change"] = dL_relative
                additional_values_to_log["penalty_value"] = penalty_value
                additional_values_to_log["penalty_lambda"] = current_lambda
                additional_values_to_log["penalty_scaling"] = penalty_scaling
                additional_values_to_log["penalty_factor"] = penalty_factor
                additional_values_to_log["max_norm_objective_gradient"] = max_norm_objective_gradient

                self.data_logger.LogCurrentValues(total_iteration, additional_values_to_log)
                self.data_logger.LogCurrentDesign(total_iteration)

                previos_L = L

                # Convergence check of inner loop
                if total_iteration == self.max_total_iterations:
                    is_max_total_iterations_reached = True
                    break

                if inner_iteration >= self.min_inner_iterations and inner_iteration >1:
                    # In the first outer iteration, the constraint is not yet active and properly scaled. Therefore, the objective is used to check the relative improvement
                    if outer_iteration == 1:
                        if abs(self.data_logger.GetValues("rel_change_objective")[total_iteration]) < self.inner_iteration_tolerance:
                            break
                    else:
                        if abs(dL_relative) < self.inner_iteration_tolerance:
                            break

                if penalty_value == 0.0:
                    is_design_converged = True
                    break

                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Time needed for current optimization step = ", timer.GetLapTime(), "s")
                KM.Logger.PrintInfo("ShapeOpt", "Time needed for total optimization so far = ", timer.GetTotalTime(), "s")

            # Compute penalty factor such that estimated Lagrange multiplier is obtained
            if outer_iteration==1:
                penalty_factor = self.estimated_lagrange_multiplier/penalty_value

            # Update lambda
            current_lambda = current_lambda + penalty_factor*penalty_value

            KM.Logger.Print("")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for current optimization step = ", timer.GetLapTime(), "s")
            KM.Logger.PrintInfo("ShapeOpt", "Time needed for total optimization so far = ", timer.GetTotalTime(), "s")

            # Check convergence of outer loop
            if outer_iteration == self.max_outer_iterations:
                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Maximal outer iterations of optimization problem reached!")
                break

            if is_max_total_iterations_reached:
                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Maximal total iterations of optimization problem reached!")
                break

            if is_design_converged:
                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Update of design variables is zero. Optimization converged!")
                break
Ejemplo n.º 7
0
    def __DetermineStep(self, len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs, dir_ineqs):
        KM.Logger.Print("")
        KM.Logger.PrintInfo("ShapeOpt", "Starting determination of step...")

        timer = Timer()
        timer.StartTimer()

        # Create projector object wich can do the projection in the orthogonalized subspace
        projector = Projector(len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs, dir_ineqs, self.algorithm_settings)

        # 1. Test projection if there is room for objective improvement
        # I.e., the actual step length to become feasible for an inactive threshold is smaller than 1 and hence a part of the step can be dedicated to objective improvement
        len_obj_test = 0.01
        inactive_threshold = 100
        test_norm_dX, is_projection_sucessfull = projector.RunProjection(len_obj_test, inactive_threshold)

        KM.Logger.PrintInfo("ShapeOpt", "Time needed for one projection step = ", timer.GetTotalTime(), "s")

        # 2. Determine step following two different modes depending on the previos found step length to the feasible domain
        if is_projection_sucessfull:
            if test_norm_dX < 1: # Minimizing mode
                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Computing projection case 1...")

                func = lambda len_obj: projector.RunProjection(len_obj, inactive_threshold)

                len_obj_min = len_obj_test
                len_obj_max = 1.3
                bi_target = 1
                bi_tolerance = self.algorithm_settings["bisectioning_tolerance"].GetDouble()
                bi_max_itr = self.algorithm_settings["bisectioning_max_itr"].GetInt()
                len_obj_result, bi_itrs, bi_err = cm.PerformBisectioning(func, len_obj_min, len_obj_max, bi_target, bi_tolerance, bi_max_itr)

                projection_results = projector.GetDetailedResultsOfLatestProjection()

            else: # Correction mode
                KM.Logger.Print("")
                KM.Logger.PrintInfo("ShapeOpt", "Computing projection case 2...")

                len_obj = self.algorithm_settings["obj_share_during_correction"].GetDouble()
                func = lambda threshold: projector.RunProjection(len_obj, threshold)

                threshold_min = 0
                threshold_max = 1.3
                bi_target = 1
                bi_tolerance = self.algorithm_settings["bisectioning_tolerance"].GetDouble()
                bi_max_itr = self.algorithm_settings["bisectioning_max_itr"].GetInt()
                l_threshold_result, bi_itrs, bi_err = cm.PerformBisectioning(func, threshold_min, threshold_max, bi_target, bi_tolerance, bi_max_itr)

                projection_results = projector.GetDetailedResultsOfLatestProjection()
        else:
            raise RuntimeError("Case of not converged test projection not yet implemented yet!")

        KM.Logger.Print("")
        KM.Logger.PrintInfo("ShapeOpt", "Time needed for determining step = ", timer.GetTotalTime(), "s")

        process_details = { "test_norm_dX": test_norm_dX,
                            "bi_itrs":bi_itrs,
                            "bi_err":bi_err,
                            "adj_len_obj": projection_results["adj_len_obj"],
                            "adj_len_eqs": projection_results["adj_len_eqs"],
                            "adj_len_ineqs": projection_results["adj_len_ineqs"] }

        return projection_results["dX"], process_details