def make_multiobjective_function(sources,
                                 bounds=None,
                                 function_type="worst_case",
                                 interpolation_method="nearest",
                                 bad_sources=None):
    """Make the multiobjective function"""

    if function_type == "multiobjective_counting":
        problem = make_multiobjective_function_counting(
            sources, bounds=bounds, interpolation_method=interpolation_method)
        algorithm = NSGAII(
            problem,
            variator=CompoundOperator(  # TODO look further into this
                SBX(), HUX(), PM(), BitFlip()))
    elif function_type == "multiobjective_competing":
        if bad_sources is None:
            raise ValueError(
                "specify bad_sources for multiobjective_competing")
        problem = make_multiobjective_function_competing(
            sources,
            bounds=bounds,
            bad_sources=bad_sources,
            interpolation_method=interpolation_method)  # TODO remove this
        algorithm = NSGAII(problem)
    else:
        raise ValueError("The type : {} was not valid".format(function_type))

    return algorithm
Exemple #2
0
    def fit(self, X, y):

        opt_start_time = time.time()
        kfold = None
        if isinstance(self.cv, int) and self.cv == 1:
            X_train, X_val, y_train, y_val = train_test_split(
                X, y, test_size=0.2, random_state=self.random_seed, stratify=y)
            logger.info("Not using Cross-Validation. "
                        "Performing single train/test split")
        else:
            is_clf = self.model.is_classifier()
            kfold = check_cv(self.cv, y=y, classifier=is_clf)
            # kfold = StratifiedKFold(
            #    n_splits=self.cv, random_state=self.random_seed, shuffle=True
            # )
            logger.info(f"Using Cross-Validation - {kfold}")

        self.ind = 0

        def train_test_model(parameter):
            # First check if we exceeded allocated time budget
            current_time = time.time()
            elapsed_time = current_time - opt_start_time
            if (self.max_opt_time
                    is not None) and (elapsed_time > self.max_opt_time):
                msg = (
                    f"Max optimization time exceeded. "
                    f"Max Opt time = {self.max_opt_time}, Elapsed Time = {elapsed_time}, "
                    f"NFE Completed - {self.ind}")
                raise MaxBudgetExceededException(msg)

            self.ind = self.ind + 1
            logger.info(f"Training population {self.ind}")

            parameter = self.param_to_dict(
                parameter,
                self.model_helper.param_choices,
                self.model_helper.param_categories,
                self.model_helper.param_type,
            )

            scorers = [get_scorer(scorer) for scorer in self.scoring]
            nscorers = len(scorers)

            try:
                if kfold is None:
                    clf = self.model_helper.create_instance(parameter)
                    clf_trained = clf.fit(X_train, y_train)

                    obj_val = [
                        scorer(clf_trained, X_val, y_val) for scorer in scorers
                    ]

                else:

                    obj_scores = [[] for _ in range(nscorers)]

                    # Perform k-fold cross-validation
                    for train_index, test_index in kfold.split(X, y):
                        if isinstance(X, pd.DataFrame):
                            X_train_split, X_val_split = (
                                X.iloc[train_index],
                                X.iloc[test_index],
                            )
                            y_train_split, y_val_split = (
                                y.iloc[train_index],
                                y.iloc[test_index],
                            )
                        else:
                            X_train_split, X_val_split = X[train_index], X[
                                test_index]
                            y_train_split, y_val_split = y[train_index], y[
                                test_index]

                        clf = self.model_helper.create_instance(parameter)
                        clf_trained = clf.fit(X_train_split, y_train_split)

                        obj_score = [
                            scorer(clf_trained, X_val_split, y_val_split)
                            for scorer in scorers
                        ]
                        for i in range(nscorers):
                            obj_scores[i].append(obj_score[i])

                    # Aggregate CV score
                    obj_val = [np.mean(obj_scores[i]) for i in range(nscorers)]
                    logger.debug(f"Obj k-fold scores - {obj_scores}")

                # By default we are solving a minimization MOO problem
                fitnessValue = [
                    self.best_score[i] - obj_val[i] for i in range(nscorers)
                ]
                logger.info(f"Train fitnessValue - {fitnessValue}")

            except jsonschema.ValidationError as e:
                logger.error(f"Caught JSON schema validation error.\n{e}")
                logger.error("Setting fitness (loss) values to infinity")
                fitnessValue = [np.inf for i in range(nscorers)]
                logger.info(f"Train fitnessValue - {fitnessValue}")

            return fitnessValue

        def time_check_callback(alg):
            current_time = time.time()
            elapsed_time = current_time - opt_start_time
            logger.info(
                f"NFE Complete - {alg.nfe}, Elapsed Time - {elapsed_time}")

        parameter_num = len(self.model_helper.param_choices)
        target_num = len(self.scoring)
        # Adjust max_evals if not a multiple of population size. This is
        # required as Platypus performs evaluations in multiples of
        # population_size.
        adjusted_max_evals = (self.max_evals //
                              self.population_size) * self.population_size
        if adjusted_max_evals != self.max_evals:
            logger.info(
                f"Adjusting max_evals to {adjusted_max_evals} from specified {self.max_evals}"
            )

        problem = Problem(parameter_num, target_num)
        problem.types[:] = self.model_helper.types
        problem.function = train_test_model

        # Set the variator based on types of decision variables
        varg = {}
        first_type = problem.types[0].__class__
        all_type_same = all([isinstance(t, first_type) for t in problem.types])
        # use compound operator for mixed types
        if not all_type_same:
            varg["variator"] = CompoundOperator(SBX(), HUX(), PM(), BitFlip())

        algorithm = NSGAII(
            problem,
            population_size=self.population_size,
            **varg,
        )

        try:
            algorithm.run(adjusted_max_evals, callback=time_check_callback)
        except MaxBudgetExceededException as e:
            logger.warning(
                f"Max optimization time budget exceeded. Optimization exited prematurely.\n{e}"
            )

        solutions = nondominated(algorithm.result)
        # solutions = [s for s in algorithm.result if s.feasible]`
        # solutions = algorithm.result

        moo_solutions = []
        for solution in solutions:
            vars = []
            for pnum in range(parameter_num):
                vars.append(problem.types[pnum].decode(
                    solution.variables[pnum]))

            vars_dict = self.param_to_dict(
                vars,
                self.model_helper.param_choices,
                self.model_helper.param_categories,
                self.model_helper.param_type,
            )
            moo_solutions.append(self.Soln(vars_dict, solution.objectives))
            logger.info(f"{vars}, {solution.objectives}")

        self.moo_solutions = moo_solutions

        pareto_models = []
        for solution in self.moo_solutions:
            est = self.model_helper.create_instance(solution.variables)
            est_trained = est.fit(X, y)
            pareto_models.append((solution.variables, est_trained))

        self.pareto_models = pareto_models
        return self
Exemple #3
0
 for index, (free_node_num, fix_node_num) in enumerate(
         zip(free_nodes_num, fix_nodes_num)):
     problem = Ansys_GA(mapdl, free_node_num, fix_node_num)
     parent = problem.nvars * parent_mult_value
     #parent = 1
     print(parent)
     history = []
     GA_result_dir = os.path.join(
         PATH, "free_{}_fix_{}".format(free_node_num, fix_node_num))
     os.makedirs(GA_result_dir, exist_ok=True)
     if index == 0:  # 一つ目のGAでは遺伝子を引き継がない
         problem = Ansys_GA(mapdl, free_node_num, fix_node_num)
         algorithm = NSGAII(problem,
                            population_size=parent,
                            variator=CompoundOperator(
                                SBX(), HUX(), PM(), BitFlip()))
     else:  # 二回目以降のGAでは遺伝子を引き継ぐ
         load_GA_result_dir = os.path.join(
             PATH, "free_{}_fix_{}".format(free_nodes_num[index - 1],
                                           fix_nodes_num[index - 1]))
         load_gene_path = os.path.join(load_GA_result_dir, "parents.pk")
         problem = Ansys_GA(mapdl, free_node_num, fix_node_num)
         prior_problem = Ansys_GA(mapdl, free_nodes_num[index - 1],
                                  fix_nodes_num[index - 1])
         algorithm = FixNode_NSGAII(problem,
                                    prior_problem,
                                    gene_path=load_gene_path,
                                    population_size=parent,
                                    variator=CompoundOperator(
                                        SBX(), HUX(), PM(), BitFlip()))
     for i in tqdm(range(generation)):
    #PATH = os.path.join(save_dir, "test")
    #os.makedirs(PATH, exist_ok=True)

    problem = ConstraintIncrementalNodeIncrease_GA(2, 2)

    history = []

    start = time.time()
    # instantiate the optimization algorithm to run in parallel
    with ProcessPoolEvaluator(8) as evaluator:
        #algorithm = NSGAII(problem, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip()), evaluator=evaluator)
        algorithm = GeneticAlgorithm(problem,
                                     population_size=parent,
                                     offspring_size=parent,
                                     variator=CompoundOperator(
                                         SBX(), HUX(), UM(), BitFlip()),
                                     evaluator=evaluator)
        for i in tqdm(range(generation)):
            algorithm.step()
            """
            nondominated_solutions = nondominated(algorithm.result)
            efficiency_results = [s.objectives[0] for s in nondominated_solutions]
            max_efficiency = max(efficiency_results)
            """
            max_efficiency = algorithm.fittest.objectives[0]
            history.append(max_efficiency)

            epochs = np.arange(i + 1) + 1
            result_efficiency = np.array(history)
            fig = plt.figure()
            ax = fig.add_subplot(1, 1, 1)
Exemple #5
0
def moea(name, solsize, popsize, wscalar_, moea_type, max_gen=float('inf'), timeLimit=float('inf')):
    from platypus import HUX, BitFlip, TournamentSelector
    from platypus import Problem, Binary
    from platypus import NSGAII, NSGAIII, SPEA2
    
    from platyplus.operators import varOr
    from platyplus.algorithms import SMSEMOA
    
    time_start = time.perf_counter()
    logger.info('Running '+moea_type+' in '+name)
    
    prMutation = 0.1
    prVariation = 1-prMutation
    
    vartor = varOr(HUX(), BitFlip(1), prVariation, prMutation)
    
    def evalKnapsack(x):
        return wscalar_.fobj([xi[0] for xi in x])
    
    problem = Problem(wscalar_.N, wscalar_.M)
    problem.types[:] = [Binary(1) for i in range(wscalar_.N)]
    problem.function = evalKnapsack
    
    
    if moea_type in ['NSGAII', 'NSGAII-2', 'NSGAII-4']:
        alg = NSGAII(problem, population_size=popsize,
                     selector=TournamentSelector(1),
                     variator=vartor)
    elif moea_type in ['NSGAIII', 'NSGAIII-2', 'NSGAIII-4']:
        alg = NSGAIII(problem, divisions_outer=3,
                      population_size=popsize,
                      selector=TournamentSelector(1),
                      variator=vartor)
    elif moea_type in ['SPEA2', 'SPEA2-2', 'SPEA2-4']:
        alg = SPEA2(problem, population_size=popsize,
                     selector=TournamentSelector(1),
                     variator=vartor)
    elif moea_type in ['SMSdom']:
        alg = SMSEMOA(problem, population_size=popsize,
                     selector=TournamentSelector(1),
                     variator=vartor,
                     selection_method = 'nbr_dom')
    elif moea_type in ['SMShv']:
        alg = SMSEMOA(problem, population_size=popsize,
                     selector=TournamentSelector(1),
                     variator=vartor,
                     selection_method = 'hv_contr')
        
    gen = 1
    while gen<max_gen and time.perf_counter()-time_start<timeLimit:
        alg.step()
        gen+=1
    
    alg.population_size = solsize
    alg.step()

    moeaSols = [evalKnapsack(s.variables) for s in alg.result]

    moea_time = time.perf_counter() - time_start

    logger.info(moea_type+' in '+name+' finnished.')
    
    return moeaSols, moea_time
Exemple #6
0
def bar_multi_GA(nx=20,
                 ny=20,
                 volume_frac=0.5,
                 parent=400,
                 generation=100,
                 path="data"):
    PATH = os.path.join(path, "bar_nx_{}_ny_{}".format(nx, ny),
                        "gen_{}_pa_{}".format(generation, parent))
    os.makedirs(PATH, exist_ok=True)
    start = time.time()

    def objective(vars):
        y_1, y_2, y_3, x_4, nodes, widths = convert_var_to_arg(vars)
        edges = make_6_bar_edges(nx, ny, y_1, y_2, y_3, x_4, nodes, widths)
        rho = make_bar_structure(nx, ny, edges)
        volume = np.sum(rho) / (nx * ny)

        return [calc_E(rho), calc_G(rho)], [volume]

    def convert_var_to_arg(vars):
        y_1 = vars[0]
        y_2 = vars[1]
        y_3 = vars[2]
        x_4 = vars[3]
        node_y_indexes = vars[4:4 + 6 * 3]
        node_x_indexes = vars[4 + 6 * 3:4 + 6 * 3 * 2]
        nodes = np.stack([node_x_indexes, node_y_indexes], axis=1)
        widths = vars[4 + 6 * 3 * 2:]
        return y_1, y_2, y_3, x_4, nodes, widths

    # 2変数2目的の問題
    problem = Problem(4 + 6 * 3 * 2 + 6 * 4, 2, 1)
    # 最小化or最大化を設定
    problem.directions[:] = Problem.MAXIMIZE

    # 決定変数の範囲を設定
    x_index_const = Integer(1, nx)  # x座標に関する制約
    y_index_const = Integer(1, ny)  # y座標に関する制約
    bar_constraint = Real(0, ny / 2)  # バーの幅に関する制約
    problem.types[0:3] = y_index_const
    problem.types[3] = x_index_const
    problem.types[4:4 + 6 * 3] = y_index_const
    problem.types[4 + 6 * 3:4 + 6 * 3 * 2] = x_index_const
    problem.types[4 + 6 * 3 * 2:] = bar_constraint

    problem.constraints[:] = "<=" + str(volume_frac)
    problem.function = objective
    problem.directions[:] = Problem.MAXIMIZE
    algorithm = NSGAII(problem,
                       population_size=parent,
                       variator=CompoundOperator(SBX(), HUX(), PM(),
                                                 BitFlip()))
    algorithm.run(generation)

    # グラフを描画

    fig = plt.figure()
    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result],
                c="blue",
                label="infeasible solution")

    plt.scatter([s.objectives[0] for s in algorithm.result if s.feasible],
                [s.objectives[1] for s in algorithm.result if s.feasible],
                c="red",
                label='feasible solution')

    # 非劣解をとりだす
    nondominated_solutions = nondominated(algorithm.result)
    plt.scatter(
        [s.objectives[0] for s in nondominated_solutions if s.feasible],
        [s.objectives[1] for s in nondominated_solutions if s.feasible],
        c="green",
        label="pareto solution")
    plt.legend(loc='lower left')

    plt.xlabel("$E$")
    plt.ylabel("$G$")
    fig.savefig(os.path.join(PATH, "graph.png"))
    plt.close()

    for solution in [s for s in nondominated_solutions if s.feasible]:
        vars_list = []
        for j in solution.variables[:3]:
            vars_list.append(y_index_const.decode(j))
        vars_list.append(x_index_const.decode(solution.variables[3]))
        for j in solution.variables[4:4 + 6 * 3]:
            vars_list.append(y_index_const.decode(j))
        for j in solution.variables[4 + 6 * 3:4 + 6 * 3 * 2]:
            vars_list.append(x_index_const.decode(j))
        for j in solution.variables[4 + 6 * 3 * 2:]:
            vars_list.append(bar_constraint.decode(j))
        y_1, y_2, y_3, x_4, nodes, widths = convert_var_to_arg(vars_list)
        edges = make_6_bar_edges(nx, ny, y_1, y_2, y_3, x_4, nodes, widths)
        image = make_bar_structure(nx, ny, edges)
        np.save(
            os.path.join(
                PATH, 'E_{}_G_{}.npy'.format(solution.objectives[0],
                                             solution.objectives[1])), image)

    convert_folder_npy_to_image(PATH)

    elapsed_time = time.time() - start

    with open("time.txt", mode='a') as f:
        f.writelines("bar_nx_{}_ny_{}_gen_{}_pa_{}:{}sec\n".format(
            nx, ny, generation, parent, elapsed_time))
    dir_PATH = os.path.join(save_dir, "制約無10回平均")
    os.makedirs(dir_PATH, exist_ok=False)

    problem = IncrementalNodeIncrease_GA(2, 2)
    mean_history = []

    start = time.time()
    for z in range(10):
        PATH = os.path.join(dir_PATH, str(z))
        os.makedirs(PATH, exist_ok=False)
        history = []
        # instantiate the optimization algorithm to run in parallel
        with ProcessPoolEvaluator(8) as evaluator:
            #algorithm = NSGAII(problem, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip()), evaluator=evaluator)
            algorithm = GeneticAlgorithm(problem, population_size=parent, offspring_size=parent,
                                         variator=CompoundOperator(SBX(), HUX(), UM(), BitFlip()), evaluator=evaluator)
            for i in tqdm(range(generation)):
                algorithm.step()
                """
                nondominated_solutions = nondominated(algorithm.result)
                efficiency_results = [s.objectives[0] for s in nondominated_solutions]
                max_efficiency = max(efficiency_results)
                """
                max_efficiency = algorithm.fittest.objectives[0]
                history.append(max_efficiency)

                epochs = np.arange(i + 1) + 1
                result_efficiency = np.array(history)
                fig = plt.figure()
                ax = fig.add_subplot(1, 1, 1)
                ax.plot(epochs, result_efficiency, label='efficiency')
    for t in range(experient_num):
        PATH = os.path.join(save_dir, "{}".format(t))
        os.makedirs(PATH, exist_ok=True)

        start = time.time()
        # instantiate the optimization algorithm to run in parallel
        for index, (free_node_num, fix_node_num) in enumerate(zip(free_nodes_num, fix_nodes_num)):
            problem = GA_type(free_node_num, fix_node_num)
            parent = problem.nvars * parent_mult_value
            history = []
            GA_result_dir = os.path.join(PATH, "free_{}_fix_{}".format(free_node_num, fix_node_num))
            os.makedirs(GA_result_dir, exist_ok=True)
            with ProcessPoolEvaluator(8) as evaluator:
                if index == 0:  # 一つ目のGAでは遺伝子を引き継がない
                    problem = GA_type(free_node_num, fix_node_num)
                    algorithm = Customized_GeneticAlgorithm(problem, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip()), evaluator=evaluator)
                else:  # 二回目以降のGAでは遺伝子を引き継ぐ
                    load_GA_result_dir = os.path.join(PATH, "free_{}_fix_{}".format(free_nodes_num[index - 1], fix_nodes_num[index - 1]))
                    load_gene_path = os.path.join(load_GA_result_dir, "parents.pk")
                    problem = GA_type(free_node_num, fix_node_num)
                    prior_problem = GA_type(free_nodes_num[index - 1], fix_nodes_num[index - 1])
                    algorithm = algorithm_type(problem, prior_problem, gene_path=load_gene_path, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip()), evaluator=evaluator)
                for i in tqdm(range(generation)):
                    algorithm.step()
                    efficiency_results = [s.objectives[0] for s in algorithm.result if s.feasible]  # 実行可能解しか抽出しない
                    if len(efficiency_results) != 0:
                        max_efficiency = max(efficiency_results)
                    else:
                        max_efficiency = problem.penalty_value
                    history.append(max_efficiency)
                    epochs = np.arange(i + 1) + 1
Exemple #9
0
 # instantiate the optimization algorithm to run in parallel
 for index, (free_node_num,
             fix_node_num) in enumerate(zip(free_nodes_num, fix_nodes_num)):
     problem = VolumeConstraint_GA(free_node_num, fix_node_num)
     parent = problem.nvars * parent_mult_value
     history = []
     GA_result_dir = os.path.join(
         PATH, "free_{}_fix_{}".format(free_node_num, fix_node_num))
     os.makedirs(GA_result_dir, exist_ok=True)
     with ProcessPoolEvaluator(8) as evaluator:
         if index == 0:  # 一つ目のGAでは遺伝子を引き継がない
             problem = VolumeConstraint_GA(free_node_num, fix_node_num)
             algorithm = NSGAII(problem,
                                population_size=parent,
                                variator=CompoundOperator(
                                    SBX(), HUX(), PM(), BitFlip()),
                                evaluator=evaluator)
         else:  # 二回目以降のGAでは遺伝子を引き継ぐ
             load_GA_result_dir = os.path.join(
                 PATH, "free_{}_fix_{}".format(free_nodes_num[index - 1],
                                               fix_nodes_num[index - 1]))
             load_gene_path = os.path.join(load_GA_result_dir, "parents.pk")
             problem = VolumeConstraint_GA(free_node_num, fix_node_num)
             prior_problem = VolumeConstraint_GA(free_nodes_num[index - 1],
                                                 fix_nodes_num[index - 1])
             algorithm = FixNode_NSGAII(problem,
                                        prior_problem,
                                        gene_path=load_gene_path,
                                        population_size=parent,
                                        variator=CompoundOperator(
                                            SBX(), HUX(), PM(), BitFlip()),
Exemple #10
0
def run(parent, generation, save_interval, save_dir="GA/result"):
    def objective(vars):
        # TODO condition edges_indicesの中身は左の方が右よりも小さいということをassertする
        gene_nodes_pos, gene_edges_thickness, gene_adj_element = convert_var_to_arg(vars)
        return [calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element)]

    def make_adj_triu_matrix(adj_element, node_num, condition_edges_indices):
        """隣接情報を示す遺伝子から,edge_indicesを作成する関数
        """
        adj_matrix = np.zeros((node_num, node_num))
        adj_matrix[np.triu_indices(node_num, 1)] = adj_element

        adj_matrix[(condition_edges_indices[:, 0], condition_edges_indices[:, 1])] = 1
        edge_indices = np.stack(np.where(adj_matrix), axis=1)

        return edge_indices

    def make_edge_thick_triu_matrix(gene_edges_thickness, node_num, condition_edges_indices, condition_edges_thickness, edges_indices):
        """edge_thicknessを示す遺伝子から,condition_edge_thicknessを基にedges_thicknessを作成する関数
        """
        tri = np.zeros((node_num, node_num))
        tri[np.triu_indices(node_num, 1)] = gene_edges_thickness

        tri[(condition_edges_indices[:, 0], condition_edges_indices[:, 1])] = condition_edges_thickness
        edges_thickness = tri[(edges_indices[:, 0], edges_indices[:, 1])]

        return edges_thickness

    def convert_var_to_arg(vars):
        nodes_pos = np.array(vars[0:gene_node_pos_num])
        nodes_pos = nodes_pos.reshape([int(gene_node_pos_num / 2), 2])
        edges_thickness = vars[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num]
        adj_element = vars[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num]
        return nodes_pos, edges_thickness, adj_element

    def calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element, np_save_path=False):
        condition_nodes_pos, input_nodes, input_vectors, output_nodes, \
            output_vectors, frozen_nodes, condition_edges_indices, condition_edges_thickness\
            = make_main_node_edge_info(*condition(), condition_edge_thickness=0.2)

        # make edge_indices
        edges_indices = make_adj_triu_matrix(gene_adj_element, node_num, condition_edges_indices)

        # make nodes_pos
        nodes_pos = np.concatenate([condition_nodes_pos, gene_nodes_pos])

        # 条件ノードが含まれている部分グラフを抽出
        G = nx.Graph()
        G.add_nodes_from(np.arange(len(nodes_pos)))
        G.add_edges_from(edges_indices)
        condition_node_list = input_nodes + output_nodes + frozen_nodes

        trigger = 0  # 条件ノードが全て接続するグラフが存在するとき,トリガーを発動する
        for c in nx.connected_components(G):
            sg = G.subgraph(c)  # 部分グラフ
            if set(condition_node_list) <= set(sg.nodes):  # 条件ノードが全て含まれているか
                edges_indices = np.array(sg.edges)
                trigger = 1
                break
        if trigger == 0:  # ペナルティを発動する
            return -10.0

        # make edges_thickness
        edges_thickness = make_edge_thick_triu_matrix(gene_edges_thickness, node_num, condition_edges_indices, condition_edges_thickness, edges_indices)

        env = BarFemGym(nodes_pos, input_nodes, input_vectors,
                        output_nodes, output_vectors, frozen_nodes,
                        edges_indices, edges_thickness, frozen_nodes)
        env.reset()
        efficiency = env.calculate_simulation()
        if np_save_path:
            env.render(save_path=os.path.join(np_save_path, "image.png"))
            np.save(os.path.join(np_save_path, "nodes_pos.npy"), nodes_pos)
            np.save(os.path.join(np_save_path, "edges_indices.npy"), edges_indices)
            np.save(os.path.join(np_save_path, "edges_thickness.npy"), edges_thickness)

        return float(efficiency)

    node_num = 85
    parent = (node_num * 2 + int(node_num * (node_num - 1) / 2) * 2)  # 本来ならこれの10倍

    PATH = os.path.join(save_dir, "parent_{}_gen_{}".format(parent, generation))
    os.makedirs(PATH, exist_ok=True)

    condition_node_num = 10
    gene_node_pos_num = (node_num - condition_node_num) * 2

    gene_edge_thickness_num = int(node_num * (node_num - 1) / 2)
    gene_edge_indices_num = gene_edge_thickness_num

    # 2変数2目的の問題
    problem = Problem(gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num, 1)

    # 最小化or最大化を設定
    problem.directions[:] = Problem.MAXIMIZE

    # 決定変数の範囲を設定
    coord_const = Real(0, 1)
    edge_const = Real(0.1, 1)  # バグが無いように0.1にする
    adj_constraint = Integer(0, 1)

    problem.types[0:gene_node_pos_num] = coord_const
    problem.types[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num] = edge_const
    problem.types[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num] = adj_constraint
    problem.function = objective

    algorithm = NSGAII(problem, population_size=parent,
                       variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip()))

    history = []

    for i in tqdm(range(generation)):
        algorithm.step()
        nondominated_solutions = nondominated(algorithm.result)
        efficiency_results = [s.objectives[0] for s in nondominated_solutions]
        max_efficiency = max(efficiency_results)
        history.append(max_efficiency)

        epochs = np.arange(i + 1) + 1
        result_efficiency = np.array(history)
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.plot(epochs, result_efficiency, label='efficiency')
        ax.set_xlim(1, max(epochs))
        ax.set_xlabel('epoch')
        ax.legend()
        ax.set_title("efficiency curve")
        plt.savefig(os.path.join(PATH, "history.png"))
        plt.close()

        if i % save_interval == 0:
            save_dir = os.path.join(PATH, str(i))
            max_index = efficiency_results.index(max_efficiency)
            max_solution = nondominated_solutions[max_index]

            vars = []
            vars.extend([coord_const.decode(i) for i in max_solution.variables[0:gene_node_pos_num]])
            vars.extend([edge_const.decode(i) for i in max_solution.variables[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num]])
            vars.extend([adj_constraint.decode(i) for i in max_solution.variables[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num]])
            gene_nodes_pos, gene_edges_thickness, gene_adj_element = convert_var_to_arg(vars)
            calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element, np_save_path=save_dir)

            np.save(os.path.join(save_dir, "history.npy"), history)
 #using the PCA and k-medoid algorithm?
 if editable_data['Perfrom scenario reduction'] == 'yes':
     print('Perfrom scenarios reduction using k-medoid algorithm')
     clustring_kmediod_PCA.kmedoid_clusters()
 #Do we need to perfrom the two stage stochastic programming using NSGA-II?
 if editable_data['Perform two stage optimization'] == 'yes':
     print('Perfrom two-stage stochastic optimization')
     problem = NSGA2_design_parallel_discrete.TwoStageOpt()
     with ProcessPoolEvaluator(
             int(editable_data['num_processors'])
     ) as evaluator:  #max number of accepted processors is 61 by program/ I have 8 processor on my PC
         algorithm = NSGAII(problem,
                            population_size=int(
                                editable_data['population_size']),
                            evaluator=evaluator,
                            variator=GAOperator(HUX(), BitFlip()))
         algorithm.run(int(editable_data['num_iterations']))
     NSGA2_design_parallel_discrete.results_extraction(problem, algorithm)
 #Do we need to generate Pareto-front and parallel coordinates plots for the results?
 if editable_data['Visualizing the final results'] == 'yes':
     from Two_Stage_SP.plot_results_design import parallel_plots, ParetoFront_EFs
     file_name = city_DES + '_Discrete_EF_' + str(
         float(editable_data['renewable percentage'])) + '_design_' + str(
             editable_data['num_iterations']) + '_' + str(
                 editable_data['population_size']) + '_' + str(
                     editable_data['num_processors']) + '_processors'
     results_path = os.path.join(sys.path[0], file_name)
     if not os.path.exists(results_path):
         print(
             'The results folder is not available. Please, generate the results first'
         )
Exemple #12
0
    def run_algorithm(self):
        file_path = self.lineFilePath.text()
        if file_path == "":
            self.show_simple_error("Please choose file!")
            return
        print(file_path)
        # Reading file to NRP instance
        reader: AbstractFileReader = None
        if self.radioClassicFormat.isChecked():
            reader = ClassicFileReader()
        else:
            reader = CommonFileReader()
        try:
            self.nrp_instance: NRPInstance = reader.read_nrp_instance(
                filename=file_path)
        except RuntimeError as ex:
            # If cycle
            self.show_file_error(ex, str(ex))
            return
        except Exception as ex:
            self.show_file_error(ex)
            return
        # Multi or Single
        nrp_problem: Problem = None
        self.is_last_single = not self.radioMulti.isChecked()
        if self.radioMulti.isChecked():
            nrp_problem = NRP_Problem_MO(self.nrp_instance)
        else:
            nrp_problem = NRP_Problem_SO(self.nrp_instance)

        algorithm: AbstractGeneticAlgorithm = None
        # TODO Move somewhere and add config
        # Crossover probability is 0.8 and mutation probability = 1 / (size of binary vector)
        variator = None
        # TODO try single-point crossover
        variator = GAOperator(HUX(probability=0.8), BitFlip(probability=1))
        selector = TournamentSelector(5)
        #  Dep or without dep
        if self.radioDependYes.isChecked():
            algorithm = NSGAII_Repair(nrp_problem,
                                      repairer=Repairer(
                                          self.nrp_instance.requirements),
                                      variator=variator,
                                      selector=selector)
        else:
            algorithm = NSGAII(nrp_problem,
                               variator=variator,
                               selector=selector)
        #  Take n runs
        try:
            nruns = int(self.lineNumOfRuns.text())
            if nruns < 1 or nruns > 10000000:
                self.show_simple_error(
                    "Number of runs must be between 1 and 10000000!")
                return
        except ValueError:
            self.show_simple_error("Number of runs must be integer!")
            return

        self.wait_start()
        worker = Worker(self.run_and_back, algorithm, nruns)
        self.threadpool.start(worker)