Пример #1
0
def default_termination(problem):
    if problem.n_obj > 1:
        termination = MultiObjectiveDefaultTermination()
    else:
        termination = SingleObjectiveDefaultTermination()

    return termination
def create_optimization_run(
        pop_size=pop_size_def,
        n_offsprings=n_offsprings_def,
        sampling=sampling_def,
        crossover=crossover_def,
        mutation=mutation_def,
        eliminate_duplicates=True,
        filename="foo",
        # termination = get_termination("n_gen",50),
        termination=MultiObjectiveDefaultTermination(
            x_tol=x_tol_def,
            cv_tol=cv_tol_def,
            f_tol=f_tol_def,
            nth_gen=nth_gen_def,
            n_last=n_last_def,
            n_max_gen=max_gen,
            n_max_evals=n_max_evals_def,
        ),
        lockdown_policy_control_days=lockdown_policy_control_days_def,
        lockdown_policy_lower_limits=lockdown_policy_lower_limits_def,  # can't use len(l_p_c_d) within function param def
        lockdown_policy_upper_limits=lockdown_policy_upper_limits_def,  # needs to be different from lower limit
        testing_policy_control_days=testing_policy_control_days_def,
        testing_policy_lower_limits=testing_policy_lower_limits_def,
        testing_policy_upper_limits=testing_policy_upper_limits_def,
        max_daily_tests=max_daily_tests_def,
        p_ICU=p_ICU_def,
        C_hos=C_hos_def,
        T_rec=T_rec_def,  # recovery time in years from end of experiment
        **epidemic_model_params):

    model, model_case = create_epidemic_model(**epidemic_model_params)

    #print("DEBUG policy parameters:")
    #print("ld days:", lockdown_policy_control_days)
    #print("ld lolim: ", lockdown_policy_lower_limits)
    #print("ld hilim:", lockdown_policy_upper_limits)
    #print("t days:", testing_policy_control_days)
    #print("t lolim: ", testing_policy_lower_limits)
    #print("t hilim:", testing_policy_upper_limits)

    problem = COVID_policy(
        model, model_case, lockdown_policy_control_days,
        lockdown_policy_lower_limits, lockdown_policy_upper_limits,
        testing_policy_control_days, testing_policy_lower_limits,
        testing_policy_upper_limits, max_daily_tests, p_ICU, C_hos, T_rec)

    # create initial population here

    initial_pop_x = get_sampling(
        "real_random")  # random sampling for first population

    algorithm = NSGA2(pop_size=pop_size,
                      n_offsprings=n_offsprings,
                      sampling=initial_pop_x,
                      crossover=crossover,
                      mutation=mutation,
                      eliminate_duplicates=True)

    return problem, algorithm, termination, model, model_case
Пример #3
0
def minimize(problem, algorithm, termination=None, **kwargs):
    """

    Minimization of function of one or more variables, objectives and constraints.

    This is used as a convenience function to execute several algorithms with default settings which turned
    out to work for a test single. However, evolutionary computations utilizes the idea of customizing a
    meta-algorithm. Customizing the algorithm using the object oriented interface is recommended to improve the
    convergence.

    Parameters
    ----------

    problem : pymoo.problem
        A problem object which is defined using pymoo.

    algorithm : :class:`~pymoo.model.algorithm.Algorithm`
        The algorithm object that should be used for the optimization.

    termination : class or tuple
        The termination criterion that is used to stop the algorithm.


    Returns
    -------
    res : :class:`~pymoo.model.result.Result`
        The optimization result represented as an object.

    """

    # create a copy of the algorithm object to ensure no side-effects
    algorithm = copy.deepcopy(algorithm)

    # set the termination criterion and store it in the algorithm object
    if termination is None:
        termination = None
    elif not isinstance(termination, Termination):
        if isinstance(termination, str):
            termination = get_termination(termination)
        else:
            termination = get_termination(*termination)

    # initialize the method given a problem
    algorithm.initialize(problem, termination=termination, **kwargs)

    if algorithm.termination is None:
        if problem.n_obj > 1:
            algorithm.termination = MultiObjectiveDefaultTermination()
        else:
            algorithm.termination = SingleObjectiveDefaultTermination()

    # actually execute the algorithm
    res = algorithm.solve()

    # store the copied algorithm in the result object
    res.algorithm = algorithm

    return res
Пример #4
0
 def termination(self):
     termination = MultiObjectiveDefaultTermination(
         x_tol=self.x_tol,
         cv_tol=self.cv_tol,
         f_tol=self.f_tol,
         nth_gen=self.nth_gen,
         n_last=self.n_last,
         n_max_gen=self.n_max_gen,
         n_max_evals=self.n_max_evals)
     return termination
Пример #5
0
    def __init__(self,
                 CR=0.3,
                 F=None,
                 variant="DE/rand/1/bin",
                 mutation=PM(eta=15),
                 **kwargs):

        super().__init__(CR=CR,
                         F=F,
                         variant=variant,
                         mutation=mutation,
                         display=MultiObjectiveDisplay(),
                         **kwargs)
        self.default_termination = MultiObjectiveDefaultTermination()
def calculate_pareto_front(
    state_space_equation,
    tau,
    y_measured,
    x0,
    xl: Union[np.ndarray, None],
    xu: Union[np.ndarray, None],
    n_var: int,
    n_obj: int,
    normalize_quaternions: bool,
    n_constr=0,
    pop_size=100,
    n_max_gen=1000,
    verbose=True,
    display=MyDisplay(),
) -> Result:

    # calculate pareto frontier
    problem = UUVParameterProblem(
        state_space_equation,
        tau,
        y_measured,
        x0,
        n_var=n_var,
        n_obj=n_obj,
        n_constr=n_constr,
        xl=xl,
        xu=xu,
        normalize_quaternions=normalize_quaternions,
    )
    algorithm = NSGA2(
        pop_size=pop_size,
        # n_offsprings=int(pop_size / 2),
        crossover=SimulatedBinaryCrossover(eta=15, prob=0.9),
        mutation=PolynomialMutation(prob=None, eta=15),
    )
    termination_default = MultiObjectiveDefaultTermination(n_max_gen=n_max_gen)
    termination_design_space = DesignSpaceToleranceTermination(n_max_gen=n_max_gen)
    termination_generations = get_termination("n_gen", 100)

    res = minimize(
        problem,
        algorithm,
        termination_default,
        verbose=verbose,
        display=display,
    )

    return res
Пример #7
0
    def __init__(self,
                 pop_size=100,
                 sampling=FloatRandomSampling(),
                 selection=TournamentSelection(func_comp=binary_tournament),
                 crossover=SBX(eta=15, prob=0.9),
                 mutation=PM(prob=None, eta=20),
                 eliminate_duplicates=True,
                 n_offsprings=None,
                 display=MultiObjectiveDisplay(),
                 **kwargs):
        """
        Adapted from:
        Panichella, A. (2019). An adaptive evolutionary algorithm based on non-euclidean geometry for many-objective
        optimization. GECCO 2019 - Proceedings of the 2019 Genetic and Evolutionary Computation Conference, July, 595–603.
        https://doi.org/10.1145/3321707.3321839

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}
        selection : {selection}
        crossover : {crossover}
        mutation : {mutation}
        eliminate_duplicates : {eliminate_duplicates}
        n_offsprings : {n_offsprings}
        """

        super().__init__(pop_size=pop_size,
                         sampling=sampling,
                         selection=selection,
                         crossover=crossover,
                         mutation=mutation,
                         survival=AGEMOEASurvival(),
                         eliminate_duplicates=eliminate_duplicates,
                         n_offsprings=n_offsprings,
                         display=display,
                         advance_after_initial_infill=True,
                         **kwargs)
        self.default_termination = MultiObjectiveDefaultTermination()

        self.tournament_type = 'comp_by_rank_and_crowding'
Пример #8
0
    def __init__(self,
                 pop_size=100,
                 sampling=FloatRandomSampling(),
                 selection=TournamentSelection(func_comp=binary_tournament),
                 crossover=SimulatedBinaryCrossover(eta=15, prob=0.9),
                 mutation=PolynomialMutation(prob=None, eta=20),
                 survival=RankAndCrowdingSurvival(),
                 eliminate_duplicates=True,
                 n_offsprings=None,
                 display=MultiObjectiveDisplay(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}
        selection : {selection}
        crossover : {crossover}
        mutation : {mutation}
        eliminate_duplicates : {eliminate_duplicates}
        n_offsprings : {n_offsprings}

        """

        super().__init__(pop_size=pop_size,
                         sampling=sampling,
                         selection=selection,
                         crossover=crossover,
                         mutation=mutation,
                         survival=survival,
                         eliminate_duplicates=eliminate_duplicates,
                         n_offsprings=n_offsprings,
                         display=display,
                         advance_after_initial_infill=True,
                         **kwargs)
        self.default_termination = MultiObjectiveDefaultTermination()
        self.tournament_type = 'comp_by_dom_and_crowding'
Пример #9
0
tic = time.perf_counter()

problem = MyProblem()

#BRGKA algorithm parameters- can be altered
algorithm = BRKGA(n_elites=250,
                  n_offsprings=800,
                  n_mutants=100,
                  bias=0.3,
                  eliminate_duplicates=True)

#termination criteria- can be altered
termination = MultiObjectiveDefaultTermination(x_tol=1e-8,
                                               cv_tol=1e-6,
                                               f_tol=0.0025,
                                               nth_gen=5,
                                               n_last=300,
                                               n_max_gen=300,
                                               n_max_evals=40000)

#execute problem
res = minimize(problem,
               algorithm,
               termination,
               seed=1,
               pf=problem.pareto_front(use_cache=False),
               save_history=True,
               verbose=True)

# collect the population in each generation
pop_each_gen = [a.pop for a in res.history]
Пример #10
0
def main():
    """

    Optimization module main driver

    """

    # Define initialization objects
    initializer_class = SmellInitialization if config.WARM_START else RandomInitialization
    initializer_object = initializer_class(
        udb_path=config.UDB_PATH,
        population_size=config.POPULATION_SIZE,
        lower_band=config.LOWER_BAND,
        upper_band=config.UPPER_BAND
    )

    # -------------------------------------------
    # Define optimization problems
    problems = list()  # 0: Genetic (Single), 1: NSGA-II (Multi), 2: NSGA-III (Many) objectives problems
    problems.append(
        ProblemSingleObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
        )
    )
    problems.append(
        ProblemMultiObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
        )
    )
    problems.append(
        ProblemManyObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
            verbose_design_metrics=True,
        )
    )

    # Define search algorithms
    algorithms = list()
    # 1: GA
    alg1 = GA(
        pop_size=config.POPULATION_SIZE,
        sampling=PopulationInitialization(initializer_object),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg1)

    # 2: NSGA-II
    alg2 = NSGA2(
        pop_size=config.POPULATION_SIZE,
        sampling=PopulationInitialization(initializer_object),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg2)

    # 3: NSGA-III
    # pop_size must be equal or larger than the number of reference directions
    number_of_references_points = config.POPULATION_SIZE - int(config.POPULATION_SIZE * 0.20)
    ref_dirs = get_reference_directions(
        'energy',  # algorithm
        config.NUMBER_OBJECTIVES,  # number of objectives
        number_of_references_points,  # number of reference directions
        seed=1
    )
    alg3 = NSGA3(
        ref_dirs=ref_dirs,
        pop_size=config.POPULATION_SIZE,  # 200
        sampling=PopulationInitialization(initializer_object),
        selection=TournamentSelection(func_comp=binary_tournament),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY, ),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg3)

    # Termination of algorithms
    my_termination = MultiObjectiveDefaultTermination(
        x_tol=None,
        cv_tol=None,
        f_tol=0.0015,
        nth_gen=5,
        n_last=5,
        n_max_gen=config.MAX_ITERATIONS,  # about 1000 - 1400
        n_max_evals=1e6
    )

    # Do optimization for various problems with various algorithms
    res = minimize(
        problem=problems[config.PROBLEM],
        algorithm=algorithms[config.PROBLEM],
        termination=my_termination,
        seed=1,
        verbose=False,
        copy_algorithm=True,
        copy_termination=True,
        save_history=False,
        callback=LogCallback(),
    )
    # np.save('checkpoint', res.algorithm)

    # Log results
    logger.info(f"***** Algorithm was finished in {res.algorithm.n_gen + config.NGEN} generations *****")
    logger.info(" ")
    logger.info("============ time information ============")
    logger.info(f"Start time: {datetime.fromtimestamp(res.start_time).strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"End time: {datetime.fromtimestamp(res.end_time).strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"Execution time in seconds: {res.exec_time}")
    logger.info(f"Execution time in minutes: {res.exec_time / 60}")
    logger.info(f"Execution time in hours: {res.exec_time / (60 * 60)}")
    # logger.info(f"Number of generations: {res.algorithm.n_gen}")
    # logger.info(f"Number of generations", res.algorithm.termination)

    # Log optimum solutions
    logger.info("============ All opt solutions ============")
    for i, ind in enumerate(res.opt):
        logger.info(f'Opt refactoring sequence {i}:')
        logger.info(ind.X)
        logger.info(f'Opt refactoring sequence corresponding objectives vector {i}:')
        logger.info(ind.F)
        logger.info("-" * 75)

    # Log best refactorings
    logger.info("============ Best refactoring sequences (a set of non-dominated solutions) ============")
    for i, ind in enumerate(res.X):
        logger.info(f'Best refactoring sequence {i}:')
        logger.info(ind)
        logger.info("-" * 75)
    logger.info("============ Best objective values (a set of non-dominated solutions) ============")
    for i, ind_objective in enumerate(res.F):
        logger.info(f'Best refactoring sequence corresponding objectives vector {i}:')
        logger.info(ind_objective)
        logger.info("-" * 75)

    # Save best refactorings
    population_trimmed = []
    objective_values_content = ''
    for chromosome in res.X:
        chromosome_new = []
        if config.PROBLEM == 0:  # i.e., single objective problem
            for gene_ in chromosome:
                chromosome_new.append((gene_.name, gene_.params))
        else:
            for gene_ in chromosome[0]:
                chromosome_new.append((gene_.name, gene_.params))
        population_trimmed.append(chromosome_new)

    for objective_vector in res.F:
        objective_values_content += f'{res.algorithm.n_gen + config.NGEN},'
        if config.PROBLEM == 0:
            objective_values_content += f'{objective_vector},'
        else:
            for objective_ in objective_vector:
                objective_values_content += f'{objective_},'
        objective_values_content += '\n'

    best_refactoring_sequences_path = os.path.join(
        config.PROJECT_LOG_DIR,
        f'best_refactoring_sequences_after_{res.algorithm.n_gen + config.NGEN}gens.json'
    )
    with open(best_refactoring_sequences_path, mode='w', encoding='utf-8') as fp:
        json.dump(population_trimmed, fp, indent=4)

    best_refactoring_sequences_objectives_path = os.path.join(
        config.PROJECT_LOG_DIR,
        f'best_refactoring_sequences_objectives_after_{res.algorithm.n_gen + config.NGEN}gens.csv'
    )
    with open(best_refactoring_sequences_objectives_path, mode='w', encoding='utf-8') as fp:
        fp.write(objective_values_content)

    try:
        pf = res.F
        # dm = HighTradeoffPoints()
        dm = get_decision_making("high-tradeoff")
        I = dm.do(pf)

        logger.info("============ High-tradeoff points refactoring sequences ============")
        for i, ind in enumerate(res.X[I]):
            logger.info(f'High tradeoff points refactoring sequence {i}:')
            logger.info(ind)
            logger.info("-" * 75)
        logger.info("============ High-tradeoff points objective values  ============")
        for i, ind_objective in enumerate(pf[I]):
            logger.info(f'High-tradeoff points refactoring sequence corresponding objectives vector {i}:')
            logger.info(ind_objective)
            logger.info("-" * 75)

        logger.info("High-tradeoff points mean:")
        logger.info(np.mean(pf[I], axis=0))
        logger.info("High-tradeoff points median:")
        logger.info(np.median(pf[I], axis=0))

        # Save high-tradeoff refactorings
        population_trimmed = []
        objective_values_content = ''
        for chromosome in res.X[I]:
            chromosome_new = []
            if config.PROBLEM == 0:  # i.e., single objective problem
                for gene_ in chromosome:
                    chromosome_new.append((gene_.name, gene_.params))
            else:
                for gene_ in chromosome[0]:
                    chromosome_new.append((gene_.name, gene_.params))
            population_trimmed.append(chromosome_new)

        for objective_vector in pf[I]:
            objective_values_content += f'{res.algorithm.n_gen + config.NGEN},'
            if config.PROBLEM == 0:
                objective_values_content += f'{objective_vector},'
            else:
                for objective_ in objective_vector:
                    objective_values_content += f'{objective_},'
            objective_values_content += '\n'

        high_tradeoff_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_refactoring_after_{res.algorithm.n_gen + config.NGEN}gens.json'
        )
        with open(high_tradeoff_path, mode='w', encoding='utf-8') as fp:
            json.dump(population_trimmed, fp, indent=4)

        high_tradeoff_path_objectives_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_after_{res.algorithm.n_gen + config.NGEN}gens.csv'
        )
        with open(high_tradeoff_path_objectives_path, mode='w', encoding='utf-8') as fp:
            fp.write(objective_values_content)

    except:
        logger.error("No multi-optimal solutions (error in computing high tradeoff points)!")
Пример #11
0
algorithm = NSGA2(
    pop_size=200,  #100,#40, # initial population size
    n_offsprings=100,  #50,#10, # new individuals per generation
    sampling=get_sampling("real_random"),
    crossover=get_crossover("real_sbx", prob=0.9, eta=15),
    mutation=get_mutation("real_pm", eta=20),
    eliminate_duplicates=True)

# =============================================================================
#     #Define termination criteria
# =============================================================================
#https://pymoo.org/interface/termination.html
termination = MultiObjectiveDefaultTermination(
    x_tol=1e-8,
    cv_tol=1e-6,
    f_tol=0.0025,
    nth_gen=5,
    n_last=30,
    n_max_gen=30,  # maximal number of generations
    n_max_evals=100000)

# =============================================================================
#     #Solve the problem (Optimization)
# =============================================================================
#https://pymoo.org/interface/result.html
res = minimize(
    problem,
    algorithm,
    termination,
    seed=1,
    #pf=problem.pareto_front(use_cache=False),
    save_history=True,
Пример #12
0
def main():
    # Define search algorithms
    algorithms = list()
    # 1: GA
    algorithm = GA(
        pop_size=config.POPULATION_SIZE,
        sampling=PureRandomInitialization(),
        crossover=AdaptiveSinglePointCrossover(prob=0.9),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=0.1),
        eliminate_duplicates=ElementwiseDuplicateElimination(
            cmp_func=is_equal_2_refactorings_list))
    algorithms.append(algorithm)

    # 2: NSGA II
    algorithm = NSGA2(
        pop_size=config.POPULATION_SIZE,
        sampling=PureRandomInitialization(),
        crossover=AdaptiveSinglePointCrossover(prob=0.9),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=0.1),
        eliminate_duplicates=ElementwiseDuplicateElimination(
            cmp_func=is_equal_2_refactorings_list))
    algorithms.append(algorithm)

    # 3: NSGA III
    # pop_size must be equal or larger than the number of reference directions
    number_of_references_points = config.POPULATION_SIZE - int(
        config.POPULATION_SIZE * 0.20)
    ref_dirs = get_reference_directions(
        'energy',  # algorithm
        8,  # number of objectives
        number_of_references_points,  # number of reference directions
        seed=1)
    algorithm = NSGA3(
        ref_dirs=ref_dirs,
        pop_size=config.POPULATION_SIZE,  # 200
        sampling=PureRandomInitialization(),
        selection=TournamentSelection(func_comp=binary_tournament),
        crossover=AdaptiveSinglePointCrossover(prob=0.8),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=0.1),
        eliminate_duplicates=ElementwiseDuplicateElimination(
            cmp_func=is_equal_2_refactorings_list))
    algorithms.append(algorithm)

    # -------------------------------------------
    # Define problems
    problems = list()
    problems.append(
        ProblemSingleObjective(n_refactorings_lowerbound=config.LOWER_BAND,
                               n_refactorings_upperbound=config.UPPER_BAND))
    problems.append(
        ProblemMultiObjective(n_refactorings_lowerbound=config.LOWER_BAND,
                              n_refactorings_upperbound=config.UPPER_BAND))
    problems.append(
        ProblemManyObjective(n_refactorings_lowerbound=config.LOWER_BAND,
                             n_refactorings_upperbound=config.UPPER_BAND,
                             evaluate_in_parallel=True))

    # Termination of algorithms
    my_termination = MultiObjectiveDefaultTermination(
        x_tol=None,
        cv_tol=None,
        f_tol=0.0015,
        nth_gen=10,
        n_last=20,
        n_max_gen=config.MAX_ITERATIONS,  # about 1000 - 1400
        n_max_evals=1e6)

    # Do optimization for various problems with various algorithms
    res = minimize(
        problem=problems[2],
        algorithm=algorithms[2],
        termination=my_termination,
        seed=1,
        verbose=False,
        copy_algorithm=True,
        copy_termination=True,
        save_history=False,
    )
    # np.save('checkpoint', res.algorithm)

    # Log results
    logger.info("\n** FINISHED **\n")
    logger.info(
        "Best refactoring sequences (a set of non-dominated solutions):")
    logger.info(res.X)
    logger.info("Best objective values (a set of non-dominated solutions):")
    logger.info(res.F)

    logger.info("=" * 75)
    logger.info("Other solutions:")
    for ind in res.opt:
        logger.info(ind.X)
        logger.info(ind.F)
        logger.info("-" * 50)
    logger.info("=" * 75)

    logger.info(f"Start time: {res.start_time}")
    logger.info(f"End time: {res.end_time}")
    logger.info(f"Execution time in seconds: {res.exec_time}")
    logger.info(f"Execution time in minutes: {res.exec_time / 60}")
    logger.info(f"Execution time in hours: {res.exec_time / (60 * 60)}")
    logger.info(f"Number of generations: {res.algorithm.n_gen}")
    # logger.info(f"Number of generations", res.algorithm.termination)

    pf = res.F
    # dm = HighTradeoffPoints()
    dm = get_decision_making("high-tradeoff")
    try:
        I = dm.do(pf)
        logger.info(f"High tradeoff points: {pf[I][0]}")
        logger.info(
            f"High tradeoff points corresponding refactorings: {res.X[I]}")
        logger.info(
            f"The mean improvement of quality attributes: {np.mean(pf[I][0], axis=0)}"
        )
        logger.info(
            f"The median improvement of quality attributes: {np.median(pf[I][0], axis=0)}"
        )
    except:
        logger.info(
            "No multi optimal solutions (error in computing high tradeoff points)!"
        )
Пример #13
0
job = "constraint_analysis"
T = np.pi / (2 * 0.125)
#inv_G_ls = np.linspace(0.5,5,10) * T
inv_G_ls = [0.001 * T]  #np.logspace(-2, 0, 10)[:5] * T

for num, inv_G in enumerate(inv_G_ls):
    dim = (3, 2)  # dimension of the problem
    s, d = dim
    N = (s - 2) * d
    repeat = 1
    r_min = 0.1

    # -----------------------------------------------------------------------------
    # Optimization parameters:
    termination = MultiObjectiveDefaultTermination(x_tol=1e-8,
                                                   f_tol=10 - 8,
                                                   n_max_gen=10)  # max=300

    objs = [
        lambda site_w_gt: OpenQT(s,
                                 d,
                                 np.array(site_w_gt[:(s - 2) * d]),
                                 np.array(site_w_gt[(s - 2) * d:-1]),
                                 Gamma=1 / inv_G,
                                 n_p=3,
                                 g_t=site_w_gt[-1]).T_r(epabs=0.01, limit=100)[
                                     0]
    ]

    constr_ieq = constr(s, d, r_min, bc=True)
Пример #14
0
def minimize(problem, algorithm, termination=None, **kwargs):
    """

    Minimization of function of one or more variables, objectives and constraints.

    This is used as a convenience function to execute several algorithms with default settings which turned
    out to work for a test single. However, evolutionary computations utilizes the idea of customizing a
    meta-algorithm. Customizing the algorithm using the object oriented interface is recommended to improve the
    convergence.

    Parameters
    ----------

    problem : :class:`~pymoo.model.problem.Problem`
        A problem object which is defined using pymoo.

    algorithm : :class:`~pymoo.model.algorithm.Algorithm`
        The algorithm object that should be used for the optimization.

    termination : :class:`~pymoo.model.termination.Termination` or tuple
        The termination criterion that is used to stop the algorithm.

    seed : integer
        The random seed to be used.

    verbose : bool
        Whether output should be printed or not.

    display : :class:`~pymoo.util.display.Display`
        Each algorithm has a default display object for printouts. However, it can be overwritten if desired.

    callback : :class:`~pymoo.model.callback.Callback`
        A callback object which is called each iteration of the algorithm.

    save_history : bool
        Whether the history should be stored or not.

    Returns
    -------
    res : :class:`~pymoo.model.result.Result`
        The optimization result represented as an object.

    """

    # create a copy of the algorithm object to ensure no side-effects
    algorithm = copy.deepcopy(algorithm)

    # get the termination if provided as a tuple - create an object
    if termination is not None and not isinstance(termination, Termination):
        if isinstance(termination, str):
            termination = get_termination(termination)
        else:
            termination = get_termination(*termination)

    # initialize the algorithm object given a problem
    algorithm.initialize(problem, termination=termination, **kwargs)

    # if no termination could be found add the default termination either for single or multi objective
    if algorithm.termination is None:
        if problem.n_obj > 1:
            algorithm.termination = MultiObjectiveDefaultTermination()
        else:
            algorithm.termination = SingleObjectiveDefaultTermination()

    # actually execute the algorithm
    res = algorithm.solve()

    # store the deep copied algorithm in the result object
    res.algorithm = algorithm

    return res