示例#1
0
    def nn_opt(self, nn):
        with torch.no_grad():

            def obj_cons(x):
                tx = torch.tensor(x)
                out = nn(tx)
                return out[:self.nobj].numpy().tolist(), out[self.nobj:].numpy(
                ).tolist()

            def obj_ucons(x):
                tx = torch.tensor(x)
                return nn(tx).numpy().tolist()

            arch = Archive()
            if self.ncons == 0:
                prob = Problem(self.dim, self.nobj)
                prob.function = obj_ucons
            else:
                prob = Problem(self.dim, self.nobj, self.ncons)
                prob.function = obj_cons
                prob.constraints[:] = "<=0"
            prob.types[:] = [Real(self.lb, self.ub) for i in range(self.dim)]
            self.algo = NSGAII(prob, population=50, archive=arch)
            self.algo.run(5000)

            optimized = self.algo.result
            rand_idx = np.random.randint(len(optimized))
            suggested_x = torch.tensor(optimized[rand_idx].variables)
            suggested_y = nn(suggested_x)
            return suggested_x.view(-1, self.dim), suggested_y.view(
                -1, self.nobj + self.ncons)
示例#2
0
    def fit(self, x, y, bound=None, name=None):

        self.y = np.array(y)
        self.x = x

        TS = 1
        ND = len(y) - 1

        y = y / self.N

        t_start = 0.0
        t_end = ND
        t_inc = TS
        t_range = np.arange(t_start, t_end + t_inc, t_inc)

        Model_Input = (self.S0, self.I0, self.R0)

        # GA Parameters
        number_of_generations = 1000
        ga_population_size = 100
        number_of_objective_targets = 1  # The MSE
        number_of_constraints = 0
        number_of_input_variables = 2  # beta and gamma
        problem = Problem(number_of_input_variables,
                          number_of_objective_targets, number_of_constraints)
        problem.function = functools.partial(self.fitness_function,
                                             y=y,
                                             Model_Input=Model_Input,
                                             t_range=t_range)

        algorithm = NSGAII(problem, population_size=ga_population_size)

        problem.types[0] = Real(0, 1)  # beta initial Range
        problem.types[1] = Real(1 / 5, 1 / 14)  # gamma initial Range

        # Running the GA
        algorithm.run(number_of_generations)

        feasible_solutions = [s for s in algorithm.result if s.feasible]

        self.beta = feasible_solutions[0].variables[0]
        self.gamma = feasible_solutions[0].variables[1]

        input_variables = ['beta', 'gamma']
        file_address = 'optimised_coefficients/'
        filename = "ParametrosAjustados_Modelo_{}_{}_{}_Dias.txt".format(
            'SIR_EDO', name, len(x))

        if not os.path.exists(file_address):
            os.makedirs(file_address)

        file_optimised_parameters = open(file_address + filename, "w")
        file_optimised_parameters.close()
        if not os.path.exists(file_address):
            os.makedirs(file_address)
        with open(file_address + filename, "a") as file_optimised_parameters:
            for i in range(len(input_variables)):
                message = '{}:{:.4f}\n'.format(
                    input_variables[i], feasible_solutions[0].variables[i])
                file_optimised_parameters.write(message)
def Pareto_Front_Strategy(Probs, odds, Total_BetMax, Max_individual_bet,
                          Min_individual_bet):

    #NOTE: The order of bets and Probs is as follows: 1st (1) is AWAY win and 2nd (2) is HOME win

    Bets = np.argmax(Probs, 1) + 1

    n = len(Probs)  #number of games

    # Calculate probabilities for ALL the events together (i.e. each outcome). This will be used as weights for our optimisation
    k = n
    num_events = int(pow(2, n))
    Event_Probs = np.zeros(num_events)

    outc = outcomes_HACK(n)

    for i in range(num_events):
        individual_probs = np.diag(Probs[:, outc[i, :] - 1])
        Event_Probs[i] = PoissonBinomialPDF(k, n, individual_probs)

    #Pareto optimization
    problem = Problem(n, 2, 1)  #vars, problem dim, constraints
    problem.types[:] = Real(
        Min_individual_bet,
        Max_individual_bet)  #lower and upper bounds for all decision variables
    problem.function = functools.partial(ExpectationRisk,
                                         arg1=n,
                                         arg2=odds,
                                         arg3=Event_Probs,
                                         arg4=Bets,
                                         arg5=outc)
    problem.constraints[:] = "<=" + str(
        Total_BetMax
    ) + ""  #inequality constraints: sum of bets no more than BetMax
    algorithm = NSGAII(problem)
    algorithm.run(5000)

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.show()

    Stakes = []

    Ratio = 0

    for i in range(len(algorithm.result)):
        #objectives are: -data[0] = Expectation, data[1] = Variance

        #Exp/Variance ration threshold
        Exp = algorithm.result[i].objectives._data[0]
        Var = algorithm.result[i].objectives._data[1]
        curRatio = Exp / Var

        if curRatio > Ratio:
            Ratio = curRatio
            Stakes = algorithm.result[i].variables
            ExpOut = Exp
            VarOut = Var

    return (Stakes, Bets, ExpOut, VarOut)
示例#4
0
    def update(self, **kwargs):
        """
        Rewrite update
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'eta' in kwargs and 'num_data' in kwargs
        super(USeMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]
        assert self.Y_dim > 1

        # update single acquisition function
        for i in range(self.Y_dim):
            self.single_acq[i].update(model=self.model[i],
                                      eta=self.eta[i],
                                      num_data=self.num_data)
            self.uncertainty_acq[i].update(model=self.model[i],
                                           eta=self.eta[i],
                                           num_data=self.num_data)

        def CMO(x):
            x = np.asarray(x)
            # minimize negative acq
            return [
                -self.single_acq[i](x, convert=False)[0][0]
                for i in range(self.Y_dim)
            ]

        problem = Problem(self.X_dim, self.Y_dim)
        set_problem_types(self.config_space, problem)
        problem.function = CMO

        variator = get_variator(self.config_space)
        algorithm = NSGAII(problem, population_size=100, variator=variator)
        algorithm.run(2500)
        # decode
        for s in algorithm.result:
            s.variables[:] = [
                problem.types[i].decode(s.variables[i])
                for i in range(problem.nvars)
            ]
        cheap_pareto_set = [
            solution.variables for solution in algorithm.result
        ]
        # cheap_pareto_set_unique = []
        # for i in range(len(cheap_pareto_set)):
        #     if not any((cheap_pareto_set[i] == x).all() for x in self.X):
        #         cheap_pareto_set_unique.append(cheap_pareto_set[i])
        cheap_pareto_set_unique = cheap_pareto_set

        single_uncertainty = np.array([
            self.uncertainty_acq[i](np.asarray(cheap_pareto_set_unique),
                                    convert=False) for i in range(self.Y_dim)
        ])  # shape=(Y_dim, N, 1)
        single_uncertainty = single_uncertainty.reshape(self.Y_dim,
                                                        -1)  # shape=(Y_dim, N)
        self.uncertainties = np.prod(single_uncertainty,
                                     axis=0)  # shape=(Y_dim,) todo normalize?
        self.candidates = np.array(cheap_pareto_set_unique)
    def make_platypus_objective_function_competing_function(
            self, sources, bad_sources=[]):
        total_ret_func = make_total_lookup_function(
            sources, interpolation_method=self.interpolation_method
        )  # the function to be optimized
        bad_sources_func = make_total_lookup_function(
            bad_sources,
            type="fastest",
            interpolation_method=self.interpolation_method
        )  # the function to be optimized

        def multiobjective_func(x):  # this is the double objective function
            return [total_ret_func(x), bad_sources_func(x)]

        num_inputs = len(sources) * 2  # there is an x, y for each source
        NUM_OUPUTS = 2  # the default for now
        # define the demensionality of input and output spaces
        problem = Problem(num_inputs, NUM_OUPUTS)
        x, y, time = sources[0]  # expand the first source
        min_x = min(x)
        min_y = min(y)
        max_x = max(x)
        max_y = max(y)
        print("min x : {}, max x : {}, min y : {}, max y : {}".format(
            min_x, max_x, min_y, max_y))
        problem.types[::2] = Real(min_x, max_x)  # This is the feasible region
        problem.types[1::2] = Real(min_y, max_y)
        problem.function = multiobjective_func
        # the second function should be maximized rather than minimized
        problem.directions[1] = Problem.MAXIMIZE
        return problem
示例#6
0
def platypus_cube(objective,
                  scale,
                  n_trials,
                  n_dim,
                  strategy,
                  with_count=False):

    global feval_count
    feval_count = 0

    def _objective(vars):
        global feval_count
        feval_count += 1
        return objective(list(vars))[0]

    problem = Problem(n_dim, 1, 0)
    problem.types[:] = [Real(-scale, scale)] * n_dim
    problem.constraints[:] = "<=0"
    problem.function = _objective

    algorithm = strategy(problem)
    algorithm.run(n_trials)
    feasible_solution_obj = [
        s.objectives[0] for s in algorithm.result if s.feasible
    ]
    best_obj = min(feasible_solution_obj)
    return (best_obj, feval_count) if with_count else best_obj
示例#7
0
def generate_initial_population(pop_size=10, number_of_gd_steps=50):
    p1 = GradientDescentAdam(R, lr=alpha)
    initial_pop = []
    meta_g = None
    vec_len = None

    for x in range(pop_size):
        _, _, _, _, G, S = p1.optimize(number_of_gd_steps, ks=ks)
        v, meta = roll(G, S)
        meta_g = meta
        vec_len = len(v)
        initial_pop.append(v.tolist())

    problem = Problem(vec_len, 1)
    problem.types[:] = Real(0, 1000)
    problem.function = partial(fit, p1, meta_g)

    generator = RandomGenerator()
    population = []

    for i in range(pop_size):
        p = generator.generate(problem)
        p.variables = initial_pop[i]
        problem.evaluate(p)
        population.append(p)

    return problem, population, meta_g, generator
示例#8
0
def project():
    conn = sqlite3.connect('products.db')
    companies = get_companies(conn)

    problem = Problem(len(companies), 2)
    #problem.types[:] = [Integer(1, nlojas) for _ range(nprodutos)]

    problem.function = schaffer

    algorithm = NSGAII(problem)
    algorithm.run(10000)

    company = 0
    while (company < len(companies)):
        result = schaffer(companies[company][0], conn)
        print(result)
        company += 1

    conn.close()

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.xlim([0, 1.1])
    plt.ylim([0, 1.1])
    plt.xlabel("Preço")
    plt.ylabel("Distancia")
    plt.show()
    def make_platypus_objective_function_counting(self,
                                                  sources,
                                                  times_more_detectors=1):
        """
        This balances the number of detectors with the quality of the outcome
        """
        total_ret_func = make_total_lookup_function(
            sources, masked=True)  # the function to be optimized
        counting_func = make_counting_objective()

        def multiobjective_func(x):  # this is the double objective function
            return [total_ret_func(x), counting_func(x)]

        # there is an x, y, and a mask for each source so there must be three
        # times more input variables
        # the upper bound on the number of detectors n times the number of
        # sources
        num_inputs = len(sources) * 3 * times_more_detectors
        NUM_OUPUTS = 2  # the default for now
        # define the demensionality of input and output spaces
        problem = Problem(num_inputs, NUM_OUPUTS)
        x, y, time = sources[0]  # expand the first source
        min_x = min(x)
        min_y = min(y)
        max_x = max(x)
        max_y = max(y)
        print("min x : {}, max x : {}, min y : {}, max y : {}".format(
            min_x, max_x, min_y, max_y))
        problem.types[0::3] = Real(min_x, max_x)  # This is the feasible region
        problem.types[1::3] = Real(min_y, max_y)
        # This appears to be inclusive, so this is really just (0, 1)
        problem.types[2::3] = Binary(1)
        problem.function = multiobjective_func
        return problem
示例#10
0
def platypus_cube(objective, n_trials, n_dim, with_count=False, method=None):
    global feval_count
    feval_count = 0

    def _objective(vars):
        global feval_count
        feval_count += 1
        return float(objective(
            list(vars)))  # Avoid np.array as Platypus may puke

    problem = Problem(n_dim, 1, 0)
    problem.types[:] = [Real(0.0, 1.0)] * n_dim
    problem.constraints[:] = "<=0"
    problem.function = _objective

    strategy_and_args = PLATYPUS_ALGORITHMS[method]
    if isinstance(strategy_and_args, tuple):
        strategy = strategy_and_args[0]
        strategy_args = strategy_and_args[1]
        algorithm = strategy(problem, **strategy_args)
    else:
        strategy = strategy_and_args
        algorithm = strategy(problem)

    algorithm.run(n_trials)
    feasible_solution_obj = sorted([(s.objectives[0], s.variables)
                                    for s in algorithm.result if s.feasible],
                                   reverse=False)
    best_obj, best_x = feasible_solution_obj[0]
    if isinstance(best_x, FixedLengthArray):
        best_x = best_x._data  # CMA-ES returns it this way for some reason
    return (best_obj, best_x, feval_count) if with_count else (best_obj,
                                                               best_x)
示例#11
0
def autoIterate(model,
                river,
                reach,
                rs,
                flow,
                stage,
                nct,
                plot,
                outf,
                metrics,
                correctDatum,
                evals=None,
                si=False):
    """
    Automatically iterate with NSGA-II
    """
    keys = metrics  # ensure same order
    evalf = evaluator(stage, useTests=keys, correctDatum=correctDatum)
    evals = int(
        input("How many evaluations to run? ")) if evals is None else evals
    plotpath = ".".join(outf.split(".")[:-1]) + ".png"
    count = 1
    print("Running automatic calibration")

    def manningEval(vars):
        n = vars[0]
        metrics = minimized(
            nstageSingleRun(model, river, reach, rs, stage, n, keys,
                            correctDatum))
        values = [metrics[key] for key in keys]
        constraints = [-n, n - 1]
        nonlocal count
        print("Completed %d evaluations" % count)
        count += 1
        return values, constraints

    c_type = "<0"
    problem = Problem(
        1, len(keys),
        2)  # 1 decision variable, len(keys) objectives, and 2 constraints
    problem.types[:] = Real(0.001, 1)  # range of decision variable
    problem.constraints[:] = c_type
    problem.function = manningEval

    algorithm = NSGAII(problem, population_size=nct)
    algorithm.run(evals)
    nondom = nondominated(
        algorithm.result
    )  # nondom: list of Solutions - wanted value is variables[0]
    nondomNs = [sol.variables[0] for sol in nondom]
    results = runSims(model, nondomNs, river, reach, len(stage), range=[rs])
    resultPts = [(nondomNs[ix],
                  [results[ix][rs][jx] for jx in range(1,
                                                       len(stage) + 1)])
                 for ix in range(len(nondomNs))]
    metrics = [(res[0], evalf(res[1]), res[1]) for res in resultPts]
    nDisplay(metrics, flow, stage, plotpath, outf, plot, correctDatum, si)
    return metrics
示例#12
0
 def generate_problem(self):
     # 1 decision variables, 1 objectives, 2 constraints
     problem = Problem(1, 1, 2)
     problem.types[:] = Binary(len(self.requirements))
     problem.directions[:] = Problem.MAXIMIZE
     problem.constraints[0] = "!=0"
     problem.constraints[1] = "<=0"
     problem.function = self.get_problem_function
     return problem
示例#13
0
def so_run(population_size):
    so_problem = Problem(1, 1, 1)
    so_problem.types[:] = Binary(len(requirements))
    so_problem.directions[:] = Problem.MAXIMIZE
    so_problem.constraints[:] = "<={}".format(budget)
    so_problem.function = single_objective_nrp
    so_algorithm = GeneticAlgorithm(so_problem, population_size)
    so_algorithm.run(runs)
    x_so = [solution.objectives[0] for solution in so_algorithm.result]
    y_so = [solution.constraints[0] * (-1) for solution in so_algorithm.result]
    return x_so, y_so, so_algorithm
示例#14
0
def mo_run(population_size):
    mo_problem = Problem(2, 2, 2)
    mo_problem.types[:] = Binary(len(requirements))
    mo_problem.directions[0] = Problem.MAXIMIZE
    mo_problem.directions[1] = Problem.MINIMIZE
    mo_problem.constraints[:] = "<={}".format(budget)
    mo_problem.function = multi_objective_nrp
    mo_nsga = NSGAII(mo_problem, population_size)
    mo_nsga.run(runs)
    x_mo = [solution.objectives[0] for solution in mo_nsga.result]
    y_mo = [solution.objectives[1] * (-1) for solution in mo_nsga.result]
    return x_mo, y_mo, mo_nsga
    def update(self, **kwargs):
        """
        Rewrite update to support pareto front sampling.
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'constraint_perfs' in kwargs
        super(MESMOC, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]

        self.Multiplemes = [None] * self.Y_dim
        self.Multiplemes_constraints = [None] * self.num_constraints
        for i in range(self.Y_dim):
            self.Multiplemes[i] = MaxvalueEntropySearch(self.model[i], self.X, self.Y[:, i],
                                                        random_state=self.random_state)
            self.Multiplemes[i].Sampling_RFM()
        for i in range(self.num_constraints):
            # Caution dim of self.constraint_perfs!
            self.Multiplemes_constraints[i] = MaxvalueEntropySearch(self.constraint_models[i],
                                                                    self.X, self.constraint_perfs[i])
            self.Multiplemes_constraints[i].Sampling_RFM()

        self.min_samples = []
        self.min_samples_constraints = []
        for j in range(self.sample_num):
            for i in range(self.Y_dim):
                self.Multiplemes[i].weigh_sampling()
            for i in range(self.num_constraints):
                self.Multiplemes_constraints[i].weigh_sampling()

            def CMO(xi):
                xi = np.asarray(xi)
                y = [self.Multiplemes[i].f_regression(xi)[0][0] for i in range(self.Y_dim)]
                y_c = [self.Multiplemes_constraints[i].f_regression(xi)[0][0] for i in range(self.num_constraints)]
                return y, y_c

            problem = Problem(self.X_dim, self.Y_dim, self.num_constraints)
            for k in range(self.X_dim):
                problem.types[k] = Real(self.bounds[k][0], self.bounds[k][1])  # todo other types
            problem.constraints[:] = "<=0"  # todo confirm
            problem.function = CMO
            algorithm = NSGAII(problem)
            algorithm.run(1500)
            cheap_pareto_front = [list(solution.objectives) for solution in algorithm.result]
            cheap_constraints_values = [list(solution.constraints) for solution in algorithm.result]
            # picking the min over the pareto: best case
            min_of_functions = [min(f) for f in list(zip(*cheap_pareto_front))]
            min_of_constraints = [min(f) for f in list(zip(*cheap_constraints_values))]  # todo confirm
            self.min_samples.append(min_of_functions)
            self.min_samples_constraints.append(min_of_constraints)
    def _prune(self):
        problem = Problem(len(self.ensemble_), 2)
        problem.types[:] = Integer(0, 1)
        problem.directions[0] = Problem.MAXIMIZE
        problem.directions[1] = Problem.MAXIMIZE
        problem.function = functools.partial(MCE._evaluate_imbalance,
                                             y_predicts=self._y_predict,
                                             y_true=self._y_valid)

        algorithm = NSGAII(problem)
        algorithm.run(10000)

        solutions = unique(nondominated(algorithm.result))
        objectives = [sol.objectives for sol in solutions]

        def extract_variables(variables):
            extracted = [v[0] for v in variables]
            return extracted

        self._ensemble_quality = self.get_group(
            extract_variables(solutions[objectives.index(
                max(objectives, key=itemgetter(0)))].variables),
            self.ensemble_)
        self._ensemble_diversity = self.get_group(
            extract_variables(solutions[objectives.index(
                max(objectives, key=itemgetter(1)))].variables),
            self.ensemble_)
        self._ensemble_balanced = self.get_group(
            extract_variables(solutions[objectives.index(
                min(objectives, key=lambda i: abs(i[0] - i[1])))].variables),
            self.ensemble_)

        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='quality_single')
        self._ensemble_quality_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
        # pareto_set, fitnesses = self._genetic_optimalisation(optimalisation_type='diversity_single')
        # self._ensemble_diversity_single = self.get_group(pareto_set[fitnesses.index(min(fitnesses, key=itemgetter(0)))],
        #                                                  self.ensemble_)

        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='precision_single')
        self._ensemble_precision_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='recall_single')
        self._ensemble_recall_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
def cal(args, exeCount):
    start = time.time()
    param_count = 5  # パラメーター数

    problem = Problem(param_count, 2)  # 最適化パラメータの数, 目的関数の数
    problem.types[:] = Real(-2.0, 2.0)  # パラメータの範囲
    problem.function = schaffer
    algorithm = NSGAII(problem)
    algorithm.run(5000)  # 反復回数

    print('{:-^63}'.format('-'))

    # データ整理
    # params: 係数a
    # f1s   : 残留振動 [deg]
    # f2s   : エネルギー
    params = np.empty([100, param_count])
    f1s = np.empty([100])
    f2s = np.empty([100])
    for i, solution in enumerate(algorithm.result):
        result = tuple(solution.variables \
                     + solution.objectives[:])

        params[i, :] = result[:param_count][:]
        f1s[i] = 180 * result[param_count] / np.pi
        f2s[i] = result[param_count + 1]

    # 残留振動が最小になるaの値を表示
    index = np.argmin(f1s)
    print('\n*** 残留振動が最小の時の各値 ***')
    print('残留振動[deg]\t{}'.format(f1s[index]))
    print('エネルギー[J]\t{}'.format(f2s[index]))
    print('係数a\t\t{}'.format(params[index, :]))

    np.savetxt('./results/nsga2/{}/nsga2_params_{}.csv'.format(
        args[1], exeCount),
               params[index, :],
               delimiter=',')

    # 経過時間
    print(f'\nelapsed time: {time.time()-start}')

    # 係数a, 残留振動, エネルギーをCSVファイルに書き出す
    data = np.empty([100, param_count + 2])
    data[:, 0:param_count] = params
    data[:, param_count] = f1s
    data[:, param_count + 1] = f2s
    np.savetxt('./results/nsga2/{}/nsga2_data_{}.csv'.format(
        args[1], exeCount),
               data,
               delimiter=',')
 def build_problems(self):
     self.us = [self.u1, self.u2, self.u3]
     self.ws = [self.w1, self.w2, self.w3]
     temp = sum(self.used_components)
     if temp == 1:
         return self.defuzz_not_none()
     u_problem, w_problem = Problem(1, temp), Problem(1, temp)
     u_universe, w_universe = self.universe_not_none()
     u_problem.types[:] = Subset(u_universe, 1)
     w_problem.types[:] = Subset(w_universe, 1)
     u_problem.directions[:] = Problem.MAXIMIZE
     w_problem.directions[:] = Problem.MAXIMIZE
     u_problem.function, w_problem.function = self.u_function, self.w_function
     return u_problem, w_problem
示例#19
0
    def run_nsgaii_bc():
        def CMO(xi):
            xi = np.asarray(xi)
            y = [branin(xi), Currin(xi)]
            return y

        problem = Problem(2, 2)
        problem.types[:] = Real(0, 1)
        problem.function = CMO
        algorithm = NSGAII(problem)
        algorithm.run(2500)
        cheap_pareto_front = np.array(
            [list(solution.objectives) for solution in algorithm.result])
        return cheap_pareto_front
示例#20
0
def generate_initial_population(number_of_gd_steps=50):
    p1 = GradientDescentAdam(R, lr=alpha)
    initial_pop = []
    scale = 0.01

    noise_scale=0.0000001

    Gv, Sv = p1.construct_starting_point(ks, scale=scale)

    _, _, _, _, G, S = p1.optimize(G=Gv.copy(), S=Sv.copy(), steps=number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    initial_pop.append(v.tolist())

    GvNoise = []
    for x in Gv:
        GvNoise.append(x+np.random.randn(*x.shape)*noise_scale)

    SvNoise = []
    for x in Sv:
        n=[]
        for y in x:
            n.append(y+np.random.randn(*y.shape)*noise_scale)
        SvNoise.append(n)

    _, _, _, _, G, S = p1.optimize(G=GvNoise.copy(), S=SvNoise.copy(), steps=number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    initial_pop.append(v.tolist())

    np.random.rand()
    _, _, _, _, G, S = p1.optimize(number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    meta_g = meta
    vec_len = len(v)
    initial_pop.append(v.tolist())


    problem = Problem(vec_len, 1)
    problem.types[:] = Real(0, 1000)
    problem.function = partial(fit, p1, meta_g)

    generator = RandomGenerator()
    population = []

    for i in range(3):
        p = generator.generate(problem)
        p.variables = initial_pop[i]
        problem.evaluate(p)
        population.append(p)

    return problem, population, meta_g, p1
示例#21
0
    def update(self, **kwargs):
        """
        Rewrite update to support pareto front sampling.
        """
        assert 'X' in kwargs and 'Y' in kwargs
        super(MESMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]

        self.Multiplemes = [None] * self.Y_dim
        for i in range(self.Y_dim):
            self.Multiplemes[i] = MaxvalueEntropySearch(
                self.model[i],
                self.X,
                self.Y[:, i],
                random_state=self.rng.randint(10000))
            self.Multiplemes[i].Sampling_RFM()

        self.min_samples = []
        for j in range(self.sample_num):
            for i in range(self.Y_dim):
                self.Multiplemes[i].weigh_sampling()

            def CMO(xi):
                xi = np.asarray(xi)
                y = [
                    self.Multiplemes[i].f_regression(xi)[0][0]
                    for i in range(self.Y_dim)
                ]
                return y

            problem = Problem(self.X_dim, self.Y_dim)
            set_problem_types(self.config_space, problem)
            problem.function = CMO

            variator = get_variator(self.config_space)
            algorithm = NSGAII(problem, population_size=100, variator=variator)
            algorithm.run(1500)
            cheap_pareto_front = [
                list(solution.objectives) for solution in algorithm.result
            ]
            # picking the min over the pareto: best case
            min_of_functions = [min(f) for f in list(zip(*cheap_pareto_front))]
            self.min_samples.append(min_of_functions)
示例#22
0
def platypus_cube(objective,scale, n_trials, strategy):

    def _objective(vars):
        u1 = vars[0]
        u2 = vars[1]
        u3 = vars[2]
        return objective([u1,u2,u3])[0]

    problem = Problem(3, 1, 0)
    problem.types[:] = [Real(-scale, scale), Real(-scale, scale), Real(-scale, scale)]
    problem.constraints[:] = "<=0"
    problem.function = _objective

    algorithm = strategy(problem)
    algorithm.run(n_trials)
    feasible_solution_obj = [s.objectives[0] for s in algorithm.result if s.feasible]
    best_obj = min(feasible_solution_obj)
    return best_obj
def make_multiobjective_function_counting(sources,
                                          bounds,
                                          times_more_detectors=1,
                                          interpolation_method="nearest"):
    """
    This balances the number of detectors with the quality of the outcome
    bounds : list[(x_min, x_max), (y_min, y_max), ...]
        The bounds on the feasible region
    """
    objective_function = make_single_objective_function(
        sources, interpolation_method=interpolation_method,
        masked=True)  # the function to be optimized
    counting_function = make_counting_objective()

    def multiobjective_func(x):  # this is the double objective function
        return [objective_function(x), counting_function(x)]

    # there is an x, y, and a mask for each source so there must be three
    # times more input variables
    # the upper bound on the number of detectors n times the number of
    # sources
    parameterized_locations = sources[0].parameterized_locations
    dimensionality = parameterized_locations.shape[1]

    # We add a boolean flag to each location variable
    num_inputs = len(sources) * (dimensionality + 1) * times_more_detectors
    NUM_OUPUTS = 2  # the default for now
    # define the demensionality of input and output spaces
    problem = Problem(num_inputs, NUM_OUPUTS)

    logging.warning(
        f"Creating a multiobjective counting function with dimensionality {dimensionality}"
    )
    logging.warning(f"bounds are {bounds}")

    for i in range(dimensionality):
        # splat "*" notation is expanding the pair which is low, high
        problem.types[i::(dimensionality + 1)] = Real(
            *bounds[i])  # This is the feasible region

    # indicator on whether the source is on
    problem.types[dimensionality::(dimensionality + 1)] = Binary(1)
    problem.function = multiobjective_func
    return problem
    def update(self, **kwargs):
        """
        Rewrite update
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'eta' in kwargs and 'num_data' in kwargs
        super(USeMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]
        assert self.Y_dim > 1

        # update single acquisition function
        for i in range(self.Y_dim):
            self.single_acq[i].update(model=self.model[i],
                                      eta=self.eta[i],
                                      num_data=self.num_data)
            self.uncertainty_acq[i].update(model=self.model[i],
                                           eta=self.eta[i],
                                           num_data=self.num_data)

        def CMO(x):
            x = np.asarray(x)
            # minimize negative acq
            return [-self.single_acq[i](x, convert=False)[0][0] for i in range(self.Y_dim)]

        problem = Problem(self.X_dim, self.Y_dim)
        for k in range(self.X_dim):
            problem.types[k] = Real(self.bounds[k][0], self.bounds[k][1])  # todo other types
        problem.function = CMO
        algorithm = NSGAII(problem)  # todo population_size
        algorithm.run(2500)
        cheap_pareto_set = [solution.variables for solution in algorithm.result]
        # cheap_pareto_set_unique = []
        # for i in range(len(cheap_pareto_set)):
        #     if not any((cheap_pareto_set[i] == x).all() for x in self.X):   # todo convert problem? no this step?
        #         cheap_pareto_set_unique.append(cheap_pareto_set[i])
        cheap_pareto_set_unique = cheap_pareto_set

        single_uncertainty = np.array([self.uncertainty_acq[i](np.asarray(cheap_pareto_set_unique), convert=False)
                                       for i in range(self.Y_dim)])  # shape=(Y_dim, N, 1)
        single_uncertainty = single_uncertainty.reshape(self.Y_dim, -1)  # shape=(Y_dim, N)
        self.uncertainties = np.prod(single_uncertainty, axis=0)  # shape=(Y_dim,) todo normalize?
        self.candidates = np.array(cheap_pareto_set_unique)
def NSGA_test(data_name, input_dim, output_dim, x_bounds, epoch, population):
    """
    bench_mark functionのjson
    "function_name":{
        "hypervolume":,
        "v_ref":[],
        "w_ref":[],
        "x_bounds":[],
        "input_dim":,
        "output_dim":
    }
    """

    problem = Problem(input_dim, output_dim)
    problem.types[:] = [
        Real(x_bounds[i][0], x_bounds[i][1]) for i in range(input_dim)
    ]
    problem.function = eval(data_name)
    algorithm = NSGAII(problem, population_size=population)
    start = time.perf_counter()
    algorithm.run(epoch)
    end = time.perf_counter() - start
    v_ref = []
    w_ref = []
    w_ref_norm = []
    print(len(algorithm.result))
    pareto_frontier = np.zeros((len(algorithm.result), output_dim))
    pareto_frontier_norm = np.zeros((len(algorithm.result), output_dim))
    for i in range(output_dim):
        frontier_i = np.array([s.objectives[i] for s in algorithm.result])
        pareto_frontier[:, i] = frontier_i
        min_i = np.min(frontier_i)
        max_i = np.max(frontier_i)
        frontier_i_norm = (frontier_i - min_i) / (max_i - min_i)
        pareto_frontier_norm[:, i] = frontier_i_norm
        v_ref.append(min_i)
        w_ref.append(max_i)
        w_ref_norm.append(1)

    hyp = Hypervolume(minimum=v_ref, maximum=w_ref)
    HV = hyp(algorithm.result)
    return HV, end
def make_multiobjective_function_competing(sources,
                                           bounds=None,
                                           bad_sources=(),
                                           interpolation_method="nearest"):
    """Create an objective function with a false alarm"""

    # Create the two functions
    objective_function = make_single_objective_function(
        sources, interpolation_method=interpolation_method
    )  # the function to be optimized
    bad_objective_function = make_single_objective_function(
        bad_sources,
        function_type="worst_case_TTA",
        interpolation_method=interpolation_method
    )  # the function to be optimized

    def multiobjective_func(x):  # this is the double objective function
        return [objective_function(x), bad_objective_function(x)]

    parameterized_locations = sources[0].parameterized_locations
    dimensionality = parameterized_locations.shape[1]

    num_inputs = len(sources) * dimensionality
    NUM_OUPUTS = 2  # the default for now
    # define the demensionality of input and output spaces
    problem = Problem(num_inputs, NUM_OUPUTS)

    logging.warning(
        f"Creating a multiobjective competing function with dimensionality {dimensionality}"
    )
    logging.warning(f"bounds are {bounds}")
    for i in range(dimensionality):
        # splat "*" notation is expanding the pair which is low, high
        problem.types[i::dimensionality] = Real(
            *bounds[i])  # This is the feasible region

    problem.function = multiobjective_func
    # the second function should be maximized rather than minimized
    problem.directions[1] = Problem.MAXIMIZE
    return problem
示例#27
0
def platypus_grid(w_, garden_index, grids):
    # first compute convex hull of grids
    # parametrize problem by just the center of the grid
    points_list = []
    for i in range(grids.shape[0]):
        p = Point(grids[i, :])
        points_list.append(p)

    mtp = MultiPoint(points_list)
    cvx_hull = mtp.convex_hull
    gminx, gminy, gmaxx, gmaxy = cvx_hull.bounds

    problem = Problem(3, 2)
    problem.types[0] = Real(gminx, gmaxx)
    problem.types[1] = Real(gminy, gmaxy)
    problem.types[2] = Real(0, 180)
    problem.function = platypus_obj
    algorithm = NSGAII(problem)

    algorithm.run(100)
    print(algorithm.result)

    return algorithm.result
def get_currentratio(data_name, pareto_front_list, w_ref, input_dim,
                     output_dim, x_bounds):
    problem = Problem(input_dim, output_dim)
    problem.types[:] = [
        Real(x_bounds[i][0], x_bounds[i][1]) for i in range(input_dim)
    ]
    problem.function = eval(data_name)
    algorithm = NSGAII(problem, population_size=50)
    algorithm.run(10000)
    true_pareto_front = np.zeros((len(algorithm.result), output_dim))
    for i in range(len(algorithm.result)):
        true_pareto_front[i, :] = algorithm.result[i].objectives[:]
    # print(true_pareto_front)
    true_w_ref = np.max(true_pareto_front, axis=0) + 1.0e-2
    for i in range(w_ref.shape[0]):
        if true_w_ref[i] > w_ref[i]:
            w_ref[i] = true_w_ref[i]
    true_HV = utils.calc_hypervolume(true_pareto_front, w_ref)

    ratio_list = []
    for pareto_front in pareto_front_list:
        hv_i = utils.calc_hypervolume(pareto_front, w_ref)
        ratio_list.append(hv_i / true_HV)
    return ratio_list
示例#29
0
from platypus import GeneticAlgorithm, Problem, Constraint, Binary, nondominated, unique

# This simple example has an optimal value of 15 when picking items 1 and 4.
items = 7
capacity = 9
weights = [2, 3, 6, 7, 5, 9, 4]
profits = [6, 5, 8, 9, 6, 7, 3]
    
def knapsack(x):
    selection = x[0]
    total_weight = sum([weights[i] if selection[i] else 0 for i in range(items)])
    total_profit = sum([profits[i] if selection[i] else 0 for i in range(items)])
    
    return total_profit, total_weight

problem = Problem(1, 1, 1)
problem.types[0] = Binary(items)
problem.directions[0] = Problem.MAXIMIZE
problem.constraints[0] = Constraint("<=", capacity)
problem.function = knapsack

algorithm = GeneticAlgorithm(problem)
algorithm.run(10000)

for solution in unique(nondominated(algorithm.result)):
    print(solution.variables, solution.objectives)
示例#30
0
    c = c1 + [c2] + [c3]
    return [f], c


take_input()
dic = {}
for rmax__ in frange(0, rmaxi, 0.01):
    for bmax__ in frange(0, bmaxi, 0.01):
        from platypus import NSGAII, Problem, Real
        global rmax, bmax
        rmax, bmax = rmax__, bmax__

        problem = Problem(5, 1, n + 2)
        problem.types[:] = Real(0, 2000)
        problem.constraints[:] = "<=0"
        problem.function = belegundu

        algorithm = NSGAII(problem)
        algorithm.run(100 * 100)

        index = np.argmax(algorithm.result)

        dic[algorithm.result[index].
            objectives[0]] = algorithm.result[index].variables

print "============== OUTPUTS =============="
'''
print 'Solution:'
for solution in algorithm.result:
	 print(solution.objectives), solution.variables
'''
示例#31
0
    def fit(self, X, y):

        opt_start_time = time.time()
        kfold = None
        if isinstance(self.cv, int) and self.cv == 1:
            X_train, X_val, y_train, y_val = train_test_split(
                X, y, test_size=0.2, random_state=self.random_seed, stratify=y)
            logger.info("Not using Cross-Validation. "
                        "Performing single train/test split")
        else:
            is_clf = self.model.is_classifier()
            kfold = check_cv(self.cv, y=y, classifier=is_clf)
            # kfold = StratifiedKFold(
            #    n_splits=self.cv, random_state=self.random_seed, shuffle=True
            # )
            logger.info(f"Using Cross-Validation - {kfold}")

        self.ind = 0

        def train_test_model(parameter):
            # First check if we exceeded allocated time budget
            current_time = time.time()
            elapsed_time = current_time - opt_start_time
            if (self.max_opt_time
                    is not None) and (elapsed_time > self.max_opt_time):
                msg = (
                    f"Max optimization time exceeded. "
                    f"Max Opt time = {self.max_opt_time}, Elapsed Time = {elapsed_time}, "
                    f"NFE Completed - {self.ind}")
                raise MaxBudgetExceededException(msg)

            self.ind = self.ind + 1
            logger.info(f"Training population {self.ind}")

            parameter = self.param_to_dict(
                parameter,
                self.model_helper.param_choices,
                self.model_helper.param_categories,
                self.model_helper.param_type,
            )

            scorers = [get_scorer(scorer) for scorer in self.scoring]
            nscorers = len(scorers)

            try:
                if kfold is None:
                    clf = self.model_helper.create_instance(parameter)
                    clf_trained = clf.fit(X_train, y_train)

                    obj_val = [
                        scorer(clf_trained, X_val, y_val) for scorer in scorers
                    ]

                else:

                    obj_scores = [[] for _ in range(nscorers)]

                    # Perform k-fold cross-validation
                    for train_index, test_index in kfold.split(X, y):
                        if isinstance(X, pd.DataFrame):
                            X_train_split, X_val_split = (
                                X.iloc[train_index],
                                X.iloc[test_index],
                            )
                            y_train_split, y_val_split = (
                                y.iloc[train_index],
                                y.iloc[test_index],
                            )
                        else:
                            X_train_split, X_val_split = X[train_index], X[
                                test_index]
                            y_train_split, y_val_split = y[train_index], y[
                                test_index]

                        clf = self.model_helper.create_instance(parameter)
                        clf_trained = clf.fit(X_train_split, y_train_split)

                        obj_score = [
                            scorer(clf_trained, X_val_split, y_val_split)
                            for scorer in scorers
                        ]
                        for i in range(nscorers):
                            obj_scores[i].append(obj_score[i])

                    # Aggregate CV score
                    obj_val = [np.mean(obj_scores[i]) for i in range(nscorers)]
                    logger.debug(f"Obj k-fold scores - {obj_scores}")

                # By default we are solving a minimization MOO problem
                fitnessValue = [
                    self.best_score[i] - obj_val[i] for i in range(nscorers)
                ]
                logger.info(f"Train fitnessValue - {fitnessValue}")

            except jsonschema.ValidationError as e:
                logger.error(f"Caught JSON schema validation error.\n{e}")
                logger.error("Setting fitness (loss) values to infinity")
                fitnessValue = [np.inf for i in range(nscorers)]
                logger.info(f"Train fitnessValue - {fitnessValue}")

            return fitnessValue

        def time_check_callback(alg):
            current_time = time.time()
            elapsed_time = current_time - opt_start_time
            logger.info(
                f"NFE Complete - {alg.nfe}, Elapsed Time - {elapsed_time}")

        parameter_num = len(self.model_helper.param_choices)
        target_num = len(self.scoring)
        # Adjust max_evals if not a multiple of population size. This is
        # required as Platypus performs evaluations in multiples of
        # population_size.
        adjusted_max_evals = (self.max_evals //
                              self.population_size) * self.population_size
        if adjusted_max_evals != self.max_evals:
            logger.info(
                f"Adjusting max_evals to {adjusted_max_evals} from specified {self.max_evals}"
            )

        problem = Problem(parameter_num, target_num)
        problem.types[:] = self.model_helper.types
        problem.function = train_test_model

        # Set the variator based on types of decision variables
        varg = {}
        first_type = problem.types[0].__class__
        all_type_same = all([isinstance(t, first_type) for t in problem.types])
        # use compound operator for mixed types
        if not all_type_same:
            varg["variator"] = CompoundOperator(SBX(), HUX(), PM(), BitFlip())

        algorithm = NSGAII(
            problem,
            population_size=self.population_size,
            **varg,
        )

        try:
            algorithm.run(adjusted_max_evals, callback=time_check_callback)
        except MaxBudgetExceededException as e:
            logger.warning(
                f"Max optimization time budget exceeded. Optimization exited prematurely.\n{e}"
            )

        solutions = nondominated(algorithm.result)
        # solutions = [s for s in algorithm.result if s.feasible]`
        # solutions = algorithm.result

        moo_solutions = []
        for solution in solutions:
            vars = []
            for pnum in range(parameter_num):
                vars.append(problem.types[pnum].decode(
                    solution.variables[pnum]))

            vars_dict = self.param_to_dict(
                vars,
                self.model_helper.param_choices,
                self.model_helper.param_categories,
                self.model_helper.param_type,
            )
            moo_solutions.append(self.Soln(vars_dict, solution.objectives))
            logger.info(f"{vars}, {solution.objectives}")

        self.moo_solutions = moo_solutions

        pareto_models = []
        for solution in self.moo_solutions:
            est = self.model_helper.create_instance(solution.variables)
            est_trained = est.fit(X, y)
            pareto_models.append((solution.variables, est_trained))

        self.pareto_models = pareto_models
        return self
示例#32
0
import functools
from platypus import NSGAII, Problem, Real

def problem_with_args(x, arg1, arg2=5):
    print("x:", x)
    print("arg1:", arg1)
    print("arg2:", arg2)

    return [x[0]**2, (x[0]-2)**2]

problem = Problem(1, 2)
problem.types[:] = Real(-10, 10)
problem.function = functools.partial(problem_with_args, arg1=2)

algorithm = NSGAII(problem)
algorithm.run(100)
示例#33
0
from platypus import NSGAII, Problem, Real

def schaffer(x):
    return [x[0]**2, (x[0]-2)**2]

problem = Problem(1, 2)
problem.types[:] = Real(-10, 10)
problem.function = schaffer

algorithm = NSGAII(problem)
algorithm.run(10000)
示例#34
0
from platypus import NSGAII, Problem, Real

def belegundu(vars):
    x = vars[0]
    y = vars[1]
    return [-2*x + y, 2*x + y], [-x + y - 1, x + y - 7]

problem = Problem(2, 2, 2)
problem.types[:] = [Real(0, 5), Real(0, 3)]
problem.constraints[:] = "<=0"
problem.function = belegundu

algorithm = NSGAII(problem)
algorithm.run(10000)
示例#35
0
文件: tsp.py 项目: quaquel/Platypus
        (4493, 7102), (3600, 6950), (3100, 7250), (4700, 8450), (5400, 8450),
        (5610, 10053), (4492, 10052), (3600, 10800), (3100, 10950), (4700, 11650),
        (5400, 11650), (6650, 10800), (7300, 10950), (7300, 7250), (6650, 6950),
        (7300, 3300), (6650, 2300), (5400, 1600), (8350, 2300), (7850, 3300),
        (9450, 5750), (10150, 5750), (10358, 7103), (9243, 7102), (8350, 6950),
        (7850, 7250), (9450, 8450), (10150, 8450), (10360, 10053), (9242, 10052),
        (8350, 10800), (7850, 10950), (9450, 11650), (10150, 11650), (11400, 10800),
        (12050, 10950), (12050, 7250), (11400, 6950), (12050, 3300), (11400, 2300),
        (10150, 1600), (13100, 2300), (12600, 3300), (14200, 5750), (14900, 5750),
        (15108, 7103), (13993, 7102), (13100, 6950), (12600, 7250), (14200, 8450),
        (14900, 8450), (15110, 10053), (13992, 10052), (13100, 10800), (12600, 10950),
        (14200, 11650), (14900, 11650), (16150, 10800), (16800, 10950), (16800, 7250),
        (16150, 6950), (16800, 3300), (16150, 2300), (14900, 1600), (19800, 800),
        (19800, 10000), (19800, 11900), (19800, 12200), (200, 12200), (200, 1100),
        (200, 800)]

def dist(x, y):
    return round(math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2))
    
def tsp(x):
    tour = x[0]
    return sum([dist(cities[tour[i]], cities[tour[(i + 1) % len(cities)]]) for i in range(len(tour))])

problem = Problem(1, 1)
problem.types[0] = Permutation(range(len(cities)))
problem.directions[0] = Problem.MINIMIZE
problem.function = tsp
 
algorithm = GeneticAlgorithm(problem)
algorithm.run(100000, callback = lambda a : print(a.nfe, unique(nondominated(algorithm.result))[0].objectives[0]))