Esempio n. 1
0
    def Optimize(self):
        algorithm_name = self.optimization_settings["optimization_algorithm"][
            "name"].GetString()

        print(
            "\n> =============================================================================================================="
        )
        print("> ",
              Timer().GetTimeStamp(),
              ": Starting optimization using the following algorithm: ",
              algorithm_name)
        print(
            "> ==============================================================================================================\n"
        )

        algorithm = algorithm_factory.CreateOptimizationAlgorithm(
            self.optimization_settings, self.analyzer, self.communicator,
            self.model_part_controller)
        algorithm.CheckApplicability()
        algorithm.InitializeOptimizationLoop()
        algorithm.RunOptimizationLoop()
        algorithm.FinalizeOptimizationLoop()

        print(
            "\n> =============================================================================================================="
        )
        print(
            "> Finished optimization                                                                                           "
        )
        print(
            "> ==============================================================================================================\n"
        )


# ==============================================================================
    def _WriteCurrentValuesToFile(self):
        with open(self.complete_log_file_name, 'a') as csvfile:
            historyWriter = csv.writer(csvfile,
                                       delimiter=',',
                                       quotechar='|',
                                       quoting=csv.QUOTE_MINIMAL)
            row = []
            row.append("{:>4d}".format(self.current_iteration))

            objective_id = self.specified_objectives[0][
                "identifier"].GetString()
            row.append(" {:> .5E}".format(
                self.value_history[objective_id][self.current_iteration]))
            row.append(" {:> .5E}".format(
                self.value_history["abs_change_obj"][self.current_iteration]))
            row.append(" {:> .5E}".format(
                self.value_history["rel_change_obj"][self.current_iteration]))

            row.append(" {:> .5E}".format(
                self.value_history["norm_obj_gradient"][
                    self.current_iteration]))
            row.append(" {:> .5E}".format(
                self.value_history["step_size"][self.current_iteration]))
            row.append("{:>25}".format(Timer().GetTimeStamp()))
            historyWriter.writerow(row)
Esempio n. 3
0
    def _WriteCurrentValuesToFile(self):
        with open(self.complete_log_file_name, 'a') as csvfile:
            historyWriter = csv.writer(csvfile,
                                       delimiter=',',
                                       quotechar='|',
                                       quoting=csv.QUOTE_MINIMAL)
            row = []
            row.append("{:>4d}".format(self.current_index))

            objective_id = self.objectives[0]["identifier"].GetString()
            row.append(" {:> .5E}".format(self.history["response_value"]
                                          [objective_id][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["abs_change_objective"][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["rel_change_objective"][self.current_index]))

            for itr in range(self.constraints.size()):
                constraint_id = self.constraints[itr]["identifier"].GetString()
                row.append(" {:> .5E}".format(
                    self.history["response_value"][constraint_id][
                        self.current_index]))
                row.append(" {:> .5E}".format(
                    self.communicator.getReferenceValue(constraint_id)))

            row.append(" {:> .5E}".format(
                self.history["correction_scaling"][self.current_index]))
            row.append(" {:> .5E}".format(
                self.history["step_size"][self.current_index]))
            row.append("{:>25}".format(Timer().GetTimeStamp()))
            historyWriter.writerow(row)
Esempio n. 4
0
    def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        for self.optimization_iteration in range(1,self.max_iterations):
            print("\n>===================================================================")
            print("> ",timer.GetTimeStamp(),": Starting optimization iteration ", self.optimization_iteration)
            print(">===================================================================\n")

            timer.StartNewLap()

            self.__initializeNewShape()

            self.__analyzeShape()

            self.__computeShapeUpdate()

            self.__logCurrentOptimizationStep()

            print("\n> Time needed for current optimization step = ", timer.GetLapTime(), "s")
            print("> Time needed for total optimization so far = ", timer.GetTotalTime(), "s")

            if self.__isAlgorithmConverged():
                break
            else:
                self.__determineAbsoluteChanges()
Esempio n. 5
0
    def _WriteCurrentValuesToFile(self):
        with open(self.complete_log_file_name, 'a') as csvfile:
            historyWriter = csv.writer(csvfile,
                                       delimiter=',',
                                       quotechar='|',
                                       quoting=csv.QUOTE_MINIMAL)
            row = []
            row.append(str("{:>4d}".format(self.current_iteration)))

            objective_id = self.specified_objectives[0][
                "identifier"].GetString()
            row.append(
                str("{:>20f}".format(
                    self.value_history[objective_id][self.current_iteration])))
            row.append(
                str("{:>12f}".format(self.value_history["abs_change_obj"][
                    self.current_iteration])))
            row.append(
                str("{:>12f}".format(self.value_history["rel_change_obj"][
                    self.current_iteration])))

            for itr in range(self.specified_constraints.size()):
                constraint_id = self.specified_constraints[itr][
                    "identifier"].GetString()
                row.append(
                    str("{:>20f}".format(self.value_history[constraint_id][
                        self.current_iteration])))
                row.append(
                    str("{:>20f}".format(
                        self.communicator.getReferenceValue(constraint_id))))
                row.append(
                    str("{:>12f}".format(self.value_history["len_bar_cons"][
                        self.current_iteration][itr])))
                row.append(
                    str("{:>12f}".format(self.value_history["adj_len_bar_cons"]
                                         [self.current_iteration][itr])))

            row.append(
                str("{:>12d}".format(
                    self.value_history["bi_itrs"][self.current_iteration])))
            row.append(
                str("{:>12f}".format(
                    self.value_history["bi_err"][self.current_iteration])))
            row.append(
                str("{:>17f}".format(self.value_history["test_norm_dX_bar"][
                    self.current_iteration])))
            row.append(
                str("{:>12f}".format(
                    self.value_history["norm_dX"][self.current_iteration])))
            row.append(
                str("{:>12f}".format(self.value_history["step_length"][
                    self.current_iteration])))
            row.append("{:>25}".format(Timer().GetTimeStamp()))
            historyWriter.writerow(row)
    def __WriteDataToLogFile( self ):
        objectiveValue = self.objectiveHistory[self.currentIteration]
        constraintValue = self.constraintHistory[self.currentIteration]
        absoluteChangeOfObjectiveValue = self.absoluteChangeOfObjectiveHistory[self.currentIteration]
        relativeChangeOfObjectiveValue = self.relativeChangeOfObjectiveHistory[self.currentIteration]

        with open(self.completeResponseLogFileName, 'a') as csvfile:
            historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
            row = []
            row.append("{:<4s}".format(str(self.currentIteration)))
            row.append(str("{:>20f}".format(objectiveValue)))
            row.append(str("{:>12f}".format(absoluteChangeOfObjectiveValue)))
            row.append(str("{:>12f}".format(relativeChangeOfObjectiveValue)))
            row.append(str("{:>20f}".format(constraintValue)))
            row.append(str("{:>20f}".format(self.constraintOutputReference)))
            row.append(str("{:>13f}".format(self.optimizationSettings["optimization_algorithm"]["correction_scaling"].GetDouble())))
            row.append(str("{:>13f}".format(self.optimizationSettings["line_search"]["step_size"].GetDouble())))
            row.append("{:>25}".format(Timer().GetTimeStamp()))
            historyWriter.writerow(row)
Esempio n. 7
0
    def Optimize(self):
        algorithm_name = self.optimization_settings["optimization_algorithm"][
            "name"].GetString()

        print(
            "\n> =============================================================================================================="
        )
        print("> ",
              Timer().GetTimeStamp(),
              ": Starting optimization using the following algorithm: ",
              algorithm_name)
        print(
            "> ==============================================================================================================\n"
        )

        if self.model_part_controller.IsOptimizationModelPartAlreadyImported():
            print(
                "> Skipping import of optimization model part as already done by another application. "
            )
        else:
            self.model_part_controller.ImportOptimizationModelPart()

        algorithm = algorithm_factory.CreateAlgorithm(
            self.optimization_settings, self.model_part_controller,
            self.analyzer, self.communicator)

        algorithm.InitializeOptimizationLoop()
        algorithm.RunOptimizationLoop()
        algorithm.FinalizeOptimizationLoop()

        print(
            "\n> =============================================================================================================="
        )
        print(
            "> Finished optimization                                                                                           "
        )
        print(
            "> ==============================================================================================================\n"
        )


# ==============================================================================
Esempio n. 8
0
 def _WriteCurrentValuesToFile(self):
     with open(self.complete_log_file_name, 'a') as csvfile:
         historyWriter = csv.writer(csvfile,
                                    delimiter=',',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
         row = []
         row.append("{:>12d}".format(self.current_index))
         row.append("{:>10d}".format(
             self.history["outer_iteration"][self.current_index]))
         row.append("{:>10d}".format(
             self.history["inner_iteration"][self.current_index]))
         objective_id = self.objectives[0]["identifier"].GetString()
         row.append(" {:> .5E}".format(
             self.history["lagrange_value"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["lagrange_value_relative_change"][
                 self.current_index]))
         row.append(" {:> .5E}".format(self.history["response_value"]
                                       [objective_id][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["abs_change_objective"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["rel_change_objective"][self.current_index]))
         row.append("  {:> .5E}".format(
             self.history["max_norm_objective_gradient"][
                 self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_lambda"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_value"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_scaling"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["penalty_factor"][self.current_index]))
         row.append(" {:> .5E}".format(
             self.history["step_size"][self.current_index]))
         row.append("{:>25}".format(Timer().GetTimeStamp()))
         historyWriter.writerow(row)
Esempio n. 9
0
        pool.apply_async(get_x_y,
                         args=(i, fs, "test", resample),
                         callback=progress_bar) for i in range(size)
    ]

    output = [p.get() for p in results]

    x_test, y_test = split_x_y(output)

    return x_test, y_test


if __name__ == '__main__':
    file_dir = "gcc_phat_data"

    with Timer("Training data"):
        x_train_raw, y_train_raw = get_train(int(len(df.index) * .9), 12, True,
                                             8000)

    x_train = cnn.reshape_x_for_cnn(cnn.normalize_x_data(x_train_raw))
    np.save("{dir}/x_train.npy".format(dir=file_dir), x_train)

    y_train = to_categorical(cnn.y_to_class_id(y_train_raw))
    np.save("{dir}/y_train.npy".format(dir=file_dir), y_train)

    with Timer("Testing data"):
        x_test_raw, y_test_raw = get_test(int(len(df.index) * .1), 12, True,
                                          8000)

    x_test = cnn.reshape_x_for_cnn(cnn.normalize_x_data(x_test_raw))
    np.save("{dir}/x_test.npy".format(dir=file_dir), x_test)
Esempio n. 10
0
    def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        current_lambda = self.lambda0
        penalty_scaling = self.penalty_scaling_0
        penalty_factor = self.penalty_factor_0

        total_iteration = 0
        is_design_converged = False
        is_max_total_iterations_reached = False
        previos_L = None

        for outer_iteration in range(1, self.max_outer_iterations + 1):
            for inner_iteration in range(1, self.max_inner_iterations + 1):

                total_iteration += 1
                timer.StartNewLap()

                print(
                    "\n>======================================================================================="
                )
                print("> ", timer.GetTimeStamp(), ": Starting iteration ",
                      outer_iteration, ".", inner_iteration, ".",
                      total_iteration, "(outer . inner . total)")
                print(
                    ">=======================================================================================\n"
                )

                # Initialize new shape
                self.model_part_controller.UpdateTimeStep(total_iteration)

                for node in self.design_surface.Nodes:
                    new_shape_change = node.GetSolutionStepValue(
                        ALPHA_MAPPED) * node.GetValue(
                            BEAD_DIRECTION) * self.bead_height
                    node.SetSolutionStepValue(SHAPE_CHANGE, new_shape_change)

                self.model_part_controller.DampNodalVariableIfSpecified(
                    SHAPE_CHANGE)

                for node in self.design_surface.Nodes:
                    shape_update = node.GetSolutionStepValue(
                        SHAPE_CHANGE, 0) - node.GetSolutionStepValue(
                            SHAPE_CHANGE, 1)
                    node.SetSolutionStepValue(SHAPE_UPDATE, shape_update)

                self.model_part_controller.UpdateMeshAccordingInputVariable(
                    SHAPE_UPDATE)
                self.model_part_controller.SetReferenceMeshToMesh()

                # Analyze shape
                self.communicator.initializeCommunication()
                self.communicator.requestValueOf(
                    self.objectives[0]["identifier"].GetString())
                self.communicator.requestGradientOf(
                    self.objectives[0]["identifier"].GetString())

                self.analyzer.AnalyzeDesignAndReportToCommunicator(
                    self.design_surface, total_iteration, self.communicator)

                objective_value = self.communicator.getStandardizedValue(
                    self.objectives[0]["identifier"].GetString())
                objGradientDict = self.communicator.getStandardizedGradient(
                    self.objectives[0]["identifier"].GetString())
                WriteDictionaryDataOnNodalVariable(
                    objGradientDict, self.optimization_model_part, DF1DX)

                self.model_part_controller.DampNodalVariableIfSpecified(DF1DX)

                # Compute sensitivities w.r.t. scalar design variable alpha
                for node in self.design_surface.Nodes:
                    raw_gradient = node.GetSolutionStepValue(DF1DX)
                    bead_dir = node.GetValue(BEAD_DIRECTION)

                    dF1dalpha_i = self.bead_height * (
                        raw_gradient[0] * bead_dir[0] + raw_gradient[1] *
                        bead_dir[1] + raw_gradient[2] * bead_dir[2])
                    node.SetSolutionStepValue(DF1DALPHA, dF1dalpha_i)

                # Map gradient of objective
                self.mapper.InverseMap(DF1DALPHA, DF1DALPHA_MAPPED)

                # Compute scaling
                max_norm_objective_gradient = self.optimization_utilities.ComputeMaxNormOfNodalVariable(
                    DF1DALPHA_MAPPED)

                if outer_iteration == 1 and inner_iteration == min(
                        3, self.max_inner_iterations):
                    if self.bead_side == "positive" or self.bead_side == "negative":
                        max_norm_penalty_gradient = 1.0
                    elif self.bead_side == "both":
                        max_norm_penalty_gradient = 2.0

                    penalty_scaling = max_norm_objective_gradient / max_norm_penalty_gradient

                # Compute penalization term
                penalty_value = 0.0
                if self.bead_side == "positive":
                    for node in self.design_surface.Nodes:
                        if not node.Is(BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(ALPHA)
                            penalty_value += penalty_scaling * (alpha_i -
                                                                alpha_i**2)

                            penalty_gradient_i = penalty_scaling * (
                                1 - 2 * alpha_i)
                            node.SetSolutionStepValue(DPDALPHA,
                                                      penalty_gradient_i)

                elif self.bead_side == "negative":
                    for node in self.design_surface.Nodes:
                        if not node.Is(BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(ALPHA)
                            penalty_value += penalty_scaling * (-alpha_i -
                                                                alpha_i**2)

                            penalty_gradient_i = penalty_scaling * (
                                -1 - 2 * alpha_i)
                            node.SetSolutionStepValue(DPDALPHA,
                                                      penalty_gradient_i)

                elif self.bead_side == "both":
                    for node in self.design_surface.Nodes:
                        if not node.Is(BOUNDARY):
                            alpha_i = node.GetSolutionStepValue(ALPHA)
                            penalty_value += penalty_scaling * (-alpha_i**2 +
                                                                1)

                            penalty_gradient_i = penalty_scaling * (-2 *
                                                                    alpha_i)
                            node.SetSolutionStepValue(DPDALPHA,
                                                      penalty_gradient_i)

                # Filter penalty term if specified
                if self.filter_penalty_term:
                    self.mapper.InverseMap(DPDALPHA, DPDALPHA_MAPPED)

                # Compute value of Lagrange function
                L = objective_value + current_lambda * penalty_value + 0.5 * penalty_factor * penalty_value**2
                if inner_iteration == 1:
                    dL_relative = 0.0
                else:
                    dL_relative = 100 * (L / previos_L - 1)

                # Compute gradient of Lagrange function
                if self.filter_penalty_term:
                    penalty_gradient_variable = DPDALPHA_MAPPED
                else:
                    penalty_gradient_variable = DPDALPHA
                for node in self.design_surface.Nodes:
                    dLdalpha_i = node.GetSolutionStepValue(
                        DF1DALPHA_MAPPED
                    ) + current_lambda * node.GetSolutionStepValue(
                        penalty_gradient_variable)
                    node.SetSolutionStepValue(DLDALPHA, dLdalpha_i)

                # Normalization using infinity norm
                dLdalpha_for_normalization = {}
                for node in self.design_surface.Nodes:
                    nodal_alpha = node.GetSolutionStepValue(ALPHA)
                    if nodal_alpha == self.lower_bound or nodal_alpha == self.upper_bound or node.Is(
                            BOUNDARY):
                        dLdalpha_for_normalization[node.Id] = 0.0
                    else:
                        dLdalpha_for_normalization[
                            node.Id] = node.GetSolutionStepValue(DLDALPHA)**2

                max_value = math.sqrt(max(dLdalpha_for_normalization.values()))
                if max_value == 0.0:
                    max_value = 1.0

                # Compute updated design variable
                for node in self.design_surface.Nodes:
                    dalpha = -self.step_size * node.GetSolutionStepValue(
                        DLDALPHA) / max_value
                    alpha_new = node.GetSolutionStepValue(ALPHA) + dalpha

                    # Enforce bounds
                    alpha_new = max(alpha_new, self.lower_bound)
                    alpha_new = min(alpha_new, self.upper_bound)

                    # Enforce constraints
                    if node.Is(BOUNDARY):
                        alpha_new = 0.0

                    node.SetSolutionStepValue(ALPHA, alpha_new)

                    alpha_new_vectorized = alpha_new * node.GetValue(
                        BEAD_DIRECTION)
                    node.SetSolutionStepValue(CONTROL_POINT_CHANGE,
                                              alpha_new_vectorized)

                # Map design variables
                self.mapper.Map(ALPHA, ALPHA_MAPPED)

                # Log current optimization step and store values for next iteration
                additional_values_to_log = {}
                additional_values_to_log[
                    "step_size"] = self.algorithm_settings["line_search"][
                        "step_size"].GetDouble()
                additional_values_to_log["outer_iteration"] = outer_iteration
                additional_values_to_log["inner_iteration"] = inner_iteration
                additional_values_to_log["lagrange_value"] = L
                additional_values_to_log[
                    "lagrange_value_relative_change"] = dL_relative
                additional_values_to_log["penalty_value"] = penalty_value
                additional_values_to_log["penalty_lambda"] = current_lambda
                additional_values_to_log["penalty_scaling"] = penalty_scaling
                additional_values_to_log["penalty_factor"] = penalty_factor
                additional_values_to_log[
                    "max_norm_objective_gradient"] = max_norm_objective_gradient

                self.data_logger.LogCurrentValues(total_iteration,
                                                  additional_values_to_log)
                self.data_logger.LogCurrentDesign(total_iteration)

                previos_L = L

                # Convergence check of inner loop
                if total_iteration == self.max_total_iterations:
                    is_max_total_iterations_reached = True
                    break

                if inner_iteration >= self.min_inner_iterations and inner_iteration > 1:
                    # In the first outer iteration, the constraint is not yet active and properly scaled. Therefore, the objective is used to check the relative improvement
                    if outer_iteration == 1:
                        if abs(
                                self.data_logger.GetValue(
                                    "rel_change_obj", total_iteration)
                        ) < self.inner_iteration_tolerance:
                            break
                    else:
                        if abs(dL_relative) < self.inner_iteration_tolerance:
                            break

                if penalty_value == 0.0:
                    is_design_converged = True
                    break

                print("\n> Time needed for current optimization step = ",
                      timer.GetLapTime(), "s")
                print("> Time needed for total optimization so far = ",
                      timer.GetTotalTime(), "s")

            # Compute penalty factor such that estimated Lagrange multiplier is obtained
            if outer_iteration == 1:
                penalty_factor = self.estimated_lagrange_multiplier / penalty_value

            # Update lambda
            current_lambda = current_lambda + penalty_factor * penalty_value

            print("\n> Time needed for current optimization step = ",
                  timer.GetLapTime(), "s")
            print("> Time needed for total optimization so far = ",
                  timer.GetTotalTime(), "s")

            # Check convergence of outer loop
            if outer_iteration == self.max_outer_iterations:
                print(
                    "\n> Maximal outer iterations of optimization problem reached!"
                )
                break

            if is_max_total_iterations_reached:
                print(
                    "\n> Maximal total iterations of optimization problem reached!"
                )
                break

            if is_design_converged:
                print(
                    "\n> Update of design variables is zero. Optimization converged!"
                )
                break
    def RunOptimizationLoop(self):
        timer = Timer()
        timer.StartTimer()

        for self.opt_iteration in range(
                1, self.algorithm_settings["max_iterations"].GetInt() + 1):
            print(
                "\n>==================================================================="
            )
            print("> ", timer.GetTimeStamp(),
                  ": Starting optimization iteration ", self.opt_iteration)
            print(
                ">===================================================================\n"
            )

            timer.StartNewLap()

            self.__InitializeNewShape()

            self.__AnalyzeShape()

            self.__PostProcessGradientsObtainedFromAnalysis()

            len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs, dir_ineqs = self.__ConvertAnalysisResultsToLengthDirectionFormat(
            )

            step_length = self.__DetermineMaxStepLength()

            len_bar_obj, len_bar_eqs, len_bar_ineqs = self.__ExpressInStepLengthUnit(
                len_obj, len_eqs, len_ineqs, step_length)

            dX_bar, process_details = self.__DetermineStep(
                len_bar_obj, dir_obj, len_bar_eqs, dir_eqs, len_bar_ineqs,
                dir_ineqs)

            dX = self.__ComputeShapeUpdate(dX_bar, step_length)

            values_to_be_logged = {}
            values_to_be_logged["len_bar_obj"] = len_bar_obj
            values_to_be_logged[
                "len_bar_cons"] = self.__CombineConstraintDataToOrderedList(
                    len_bar_eqs, len_bar_ineqs)
            values_to_be_logged["step_length"] = step_length
            values_to_be_logged["test_norm_dX_bar"] = process_details[
                "test_norm_dX"]
            values_to_be_logged["bi_itrs"] = process_details["bi_itrs"]
            values_to_be_logged["bi_err"] = process_details["bi_err"]
            values_to_be_logged["adj_len_bar_obj"] = process_details[
                "adj_len_obj"]
            values_to_be_logged[
                "adj_len_bar_cons"] = self.__CombineConstraintDataToOrderedList(
                    process_details["adj_len_eqs"],
                    process_details["adj_len_ineqs"])
            values_to_be_logged["norm_dX"] = NormInf3D(dX)

            self.__LogCurrentOptimizationStep(values_to_be_logged)

            print("\n> Time needed for current optimization step = ",
                  timer.GetLapTime(), "s")
            print("> Time needed for total optimization so far = ",
                  timer.GetTotalTime(), "s")
    def __DetermineStep(self, len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs,
                        dir_ineqs):
        print("\n> Starting determination of step...")

        timer = Timer()
        timer.StartTimer()

        # Create projector object wich can do the projection in the orthogonalized subspace
        projector = Projector(len_obj, dir_obj, len_eqs, dir_eqs, len_ineqs,
                              dir_ineqs, self.algorithm_settings)

        # 1. Test projection if there is room for objective improvement
        # I.e., the actual step length to become feasible for an inactive threshold is smaller than 1 and hence a part of the step can be dedicated to objective improvement
        len_obj_test = 0.01
        inactive_threshold = 100
        test_norm_dX, is_projection_sucessfull = projector.RunProjection(
            len_obj_test, inactive_threshold)

        print("> Time needed for one projection step = ", timer.GetTotalTime(),
              "s")

        # 2. Determine step following two different modes depending on the previos found step length to the feasible domain
        if is_projection_sucessfull:
            if test_norm_dX < 1:  # Minimizing mode
                print("\n> Computing projection case 1...")

                func = lambda len_obj: projector.RunProjection(
                    len_obj, inactive_threshold)

                len_obj_min = len_obj_test
                len_obj_max = 1.3
                bi_target = 1
                bi_tolerance = self.algorithm_settings[
                    "bisectioning_tolerance"].GetDouble()
                bi_max_itr = self.algorithm_settings[
                    "bisectioning_max_itr"].GetInt()
                len_obj_result, bi_itrs, bi_err = PerformBisectioning(
                    func, len_obj_min, len_obj_max, bi_target, bi_tolerance,
                    bi_max_itr)

                projection_results = projector.GetDetailedResultsOfLatestProjection(
                )

            else:  # Correction mode
                print("\n> Computing projection case 2...")

                len_obj = self.algorithm_settings[
                    "obj_share_during_correction"].GetDouble()
                func = lambda threshold: projector.RunProjection(
                    len_obj, threshold)

                threshold_min = 0
                threshold_max = 1.3
                bi_target = 1
                bi_tolerance = self.algorithm_settings[
                    "bisectioning_tolerance"].GetDouble()
                bi_max_itr = self.algorithm_settings[
                    "bisectioning_max_itr"].GetInt()
                l_threshold_result, bi_itrs, bi_err = PerformBisectioning(
                    func, threshold_min, threshold_max, bi_target,
                    bi_tolerance, bi_max_itr)

                projection_results = projector.GetDetailedResultsOfLatestProjection(
                )
        else:
            raise RuntimeError(
                "Case of not converged test projection not yet implemented yet!"
            )

        print("\n> Time needed for determining step = ", timer.GetTotalTime(),
              "s")

        process_details = {
            "test_norm_dX": test_norm_dX,
            "bi_itrs": bi_itrs,
            "bi_err": bi_err,
            "adj_len_obj": projection_results["adj_len_obj"],
            "adj_len_eqs": projection_results["adj_len_eqs"],
            "adj_len_ineqs": projection_results["adj_len_ineqs"]
        }

        return projection_results["dX"], process_details
import numpy as np
import multiprocessing as mp
import pandas as pd
from custom_timer import Timer
import data_generator_lib
from tqdm import tqdm


pbar = tqdm(total=388800)

def progress(x):
    pbar.update()


if __name__ == "__main__":
    with Timer("Data Generation"):
        # Set up constant parameters: room dim etc..
        b = Binaural(room_dim=np.r_[3., 3., 2.5],
                     max_order=17,
                     speed_of_sound=343,
                     inter_aural_distance=0.2,
                     mic_height=1)

        # set up parallel processing
        pool = mp.Pool(processes=16)

        # parallel processing of data
        results = [pool.apply_async(data_generator_lib.generate_training_data,
                                    args=(
                                        source_azimuth_x,
                                        source_distance_from_room_centre,
Esempio n. 14
0
def tindar_experiment(experiment_id="default_id",
                      n_list=[10, 30, 100, 200, 300],
                      connectedness_list=[1, 4, 8],
                      solvers=["pulp", "heuristic"],
                      repeat=10,
                      result_directory=PROJECT_DIR + "/data",
                      save_problem_and_solution=False,
                      verbose=True):
    '''Writes results of Tindar experiment to a json file

    Parameters
    ----------
    experiment_id: str
    n_list: list of ints
        how many people in Tindar community
    connectedness_list: list of ints or floats
        controlling how many people are interested in each other
    solvers: list of strings
        pulp and/or heuristic, which solvers should be used to
        compute results
    repeat: int
        number of times a combination of n - connectedness should be
        repeated
    result_directory: str of a path
    save_problem_and_solution: bool
        if true, saves the love_matrix and solution matrix
    verbose: str
    '''

    result_path = f"{result_directory}/results_experiment_{experiment_id}.json"

    parameters = tuple(itertools.product(n_list, connectedness_list))

    tindar_problems_nested = [[
        TindarGenerator(p[0], p[1]) for _ in range(repeat)
    ] for p in parameters]
    tindar_problems = [
        item for sublist in tindar_problems_nested for item in sublist
    ]
    tindars = [
        Tindar(tindar_problem=tindar_problem)
        for tindar_problem in tindar_problems
    ]

    results = []
    counter = 1
    timer = Timer()
    for solver in solvers:
        for j, tp in enumerate(tindars):
            if verbose:
                print("----------------------------------------------------")
                print(f"Experiment {counter}/{(len(tindars)*len(solvers))}: "
                      f"n={tp.n} , connectedness={tp.connectedness}, "
                      f"solver={solver}")
            timer.start()
            obj, sol, stat = tindar_solution(tp, solver)
            stop = timer.stop()

            result = {
                "experiment_id": experiment_id,
                "tindar_id": j,
                "n": tp.n,
                "connectedness": tp.connectedness,
                "p": tp.p,
                "solver": solver,
                "status": stat,
                "objective_value": obj,
                "time": stop
            }

            if save_problem_and_solution:
                result = {
                    **result, "love_matrix": tp.love_matrix,
                    "solution": sol
                }

            if verbose:
                print(f"{solver} objective value: {obj}")

            results.append(result)

            counter += 1

    with open(result_path, 'w') as fp:
        json.dump(results, fp)
Esempio n. 15
0
            if counter > MAX_RUNS or counter > N_SAMPLE:
                break
            print("Processing chunk {}".format(idx))

            _min, _max = np.min(chunk), np.max(chunk)

            # Getting sample data
            sample_raw = eeg.raw.copy().crop(_min, _max, include_tmax=False)
            eog_channels = eeg.eog_channels.copy().crop(_min,
                                                        _max,
                                                        include_tmax=False)

            # Apply baseline correction
            with Timer(
                    block_name='Baseline correction',
                    verbose=True,
            ) as t1:
                sample_raw_baseline = sample_raw.copy()
                sample_raw_baseline.apply_function(baseline_calc)
            timings = timings.append(
                {
                    'task': i,
                    'chunk': idx,
                    'step': t1.block_name,
                    'time': t1.elapsed_secs
                },
                ignore_index=True)
            # eog_channels.apply_function(baseline_calc)

            # Apply bandpass filter
            with Timer(
Esempio n. 16
0
            print("Processing chunk {}".format(idx))
            # if counter > N_SAMPLE:
            # break

            _min, _max = np.min(chunk), np.max(chunk)

            # Getting sample data
            sample_raw = eeg.raw.copy().crop(_min, _max, include_tmax=False)
            eog_channels = eeg.eog_channels.copy().crop(_min,
                                                        _max,
                                                        include_tmax=False)

            # Apply baseline correction
            with Timer(
                    block_name='Baseline correction',
                    verbose=True,
            ):
                sample_raw_baseline = sample_raw.copy()
                sample_raw_baseline.apply_function(baseline_calc)
            # eog_channels.apply_function(baseline_calc)

            # Apply bandpass filter
            with Timer(
                    block_name='Bandpass filter',
                    verbose=True,
            ):
                sample_raw_bandpass = sample_raw_baseline.copy()
                sample_raw_bandpass.apply_function(butter_bandpass_filter)
            # eog_channels.apply_function(butter_bandpass_filter)

            # Apply ICA