def make_platypus_objective_function_counting(self,
                                                  sources,
                                                  times_more_detectors=1):
        """
        This balances the number of detectors with the quality of the outcome
        """
        total_ret_func = make_total_lookup_function(
            sources, masked=True)  # the function to be optimized
        counting_func = make_counting_objective()

        def multiobjective_func(x):  # this is the double objective function
            return [total_ret_func(x), counting_func(x)]

        # there is an x, y, and a mask for each source so there must be three
        # times more input variables
        # the upper bound on the number of detectors n times the number of
        # sources
        num_inputs = len(sources) * 3 * times_more_detectors
        NUM_OUPUTS = 2  # the default for now
        # define the demensionality of input and output spaces
        problem = Problem(num_inputs, NUM_OUPUTS)
        x, y, time = sources[0]  # expand the first source
        min_x = min(x)
        min_y = min(y)
        max_x = max(x)
        max_y = max(y)
        print("min x : {}, max x : {}, min y : {}, max y : {}".format(
            min_x, max_x, min_y, max_y))
        problem.types[0::3] = Real(min_x, max_x)  # This is the feasible region
        problem.types[1::3] = Real(min_y, max_y)
        # This appears to be inclusive, so this is really just (0, 1)
        problem.types[2::3] = Binary(1)
        problem.function = multiobjective_func
        return problem
    def make_platypus_objective_function_competing_function(
            self, sources, bad_sources=[]):
        total_ret_func = make_total_lookup_function(
            sources, interpolation_method=self.interpolation_method
        )  # the function to be optimized
        bad_sources_func = make_total_lookup_function(
            bad_sources,
            type="fastest",
            interpolation_method=self.interpolation_method
        )  # the function to be optimized

        def multiobjective_func(x):  # this is the double objective function
            return [total_ret_func(x), bad_sources_func(x)]

        num_inputs = len(sources) * 2  # there is an x, y for each source
        NUM_OUPUTS = 2  # the default for now
        # define the demensionality of input and output spaces
        problem = Problem(num_inputs, NUM_OUPUTS)
        x, y, time = sources[0]  # expand the first source
        min_x = min(x)
        min_y = min(y)
        max_x = max(x)
        max_y = max(y)
        print("min x : {}, max x : {}, min y : {}, max y : {}".format(
            min_x, max_x, min_y, max_y))
        problem.types[::2] = Real(min_x, max_x)  # This is the feasible region
        problem.types[1::2] = Real(min_y, max_y)
        problem.function = multiobjective_func
        # the second function should be maximized rather than minimized
        problem.directions[1] = Problem.MAXIMIZE
        return problem
示例#3
0
    def fit(self, x, y, bound=None, name=None):

        self.y = np.array(y)
        self.x = x

        TS = 1
        ND = len(y) - 1

        y = y / self.N

        t_start = 0.0
        t_end = ND
        t_inc = TS
        t_range = np.arange(t_start, t_end + t_inc, t_inc)

        Model_Input = (self.S0, self.I0, self.R0)

        # GA Parameters
        number_of_generations = 1000
        ga_population_size = 100
        number_of_objective_targets = 1  # The MSE
        number_of_constraints = 0
        number_of_input_variables = 2  # beta and gamma
        problem = Problem(number_of_input_variables,
                          number_of_objective_targets, number_of_constraints)
        problem.function = functools.partial(self.fitness_function,
                                             y=y,
                                             Model_Input=Model_Input,
                                             t_range=t_range)

        algorithm = NSGAII(problem, population_size=ga_population_size)

        problem.types[0] = Real(0, 1)  # beta initial Range
        problem.types[1] = Real(1 / 5, 1 / 14)  # gamma initial Range

        # Running the GA
        algorithm.run(number_of_generations)

        feasible_solutions = [s for s in algorithm.result if s.feasible]

        self.beta = feasible_solutions[0].variables[0]
        self.gamma = feasible_solutions[0].variables[1]

        input_variables = ['beta', 'gamma']
        file_address = 'optimised_coefficients/'
        filename = "ParametrosAjustados_Modelo_{}_{}_{}_Dias.txt".format(
            'SIR_EDO', name, len(x))

        if not os.path.exists(file_address):
            os.makedirs(file_address)

        file_optimised_parameters = open(file_address + filename, "w")
        file_optimised_parameters.close()
        if not os.path.exists(file_address):
            os.makedirs(file_address)
        with open(file_address + filename, "a") as file_optimised_parameters:
            for i in range(len(input_variables)):
                message = '{}:{:.4f}\n'.format(
                    input_variables[i], feasible_solutions[0].variables[i])
                file_optimised_parameters.write(message)
示例#4
0
 def __init__(self):
     super(my_mo_problem, self).__init__(
         self.nbOfPipes + self.nbOfPumps + 3 * self.nbOfTanks, 2, 1)
     self.types[:] = [Real(0, 9)] * self.nbOfPipes + [
         Real(0, self.n_curves - 1)
     ] * self.nbOfPumps + [Real(25, 100)] * self.nbOfTanks + [
         Real(25, 40)
     ] * self.nbOfTanks + [Real(5, 10)] * self.nbOfTanks
     self.constraints[:] = "<=0"
     self.directions[:] = Problem.MINIMIZE
def Pareto_Front_Strategy(Probs, odds, Total_BetMax, Max_individual_bet,
                          Min_individual_bet):

    #NOTE: The order of bets and Probs is as follows: 1st (1) is AWAY win and 2nd (2) is HOME win

    Bets = np.argmax(Probs, 1) + 1

    n = len(Probs)  #number of games

    # Calculate probabilities for ALL the events together (i.e. each outcome). This will be used as weights for our optimisation
    k = n
    num_events = int(pow(2, n))
    Event_Probs = np.zeros(num_events)

    outc = outcomes_HACK(n)

    for i in range(num_events):
        individual_probs = np.diag(Probs[:, outc[i, :] - 1])
        Event_Probs[i] = PoissonBinomialPDF(k, n, individual_probs)

    #Pareto optimization
    problem = Problem(n, 2, 1)  #vars, problem dim, constraints
    problem.types[:] = Real(
        Min_individual_bet,
        Max_individual_bet)  #lower and upper bounds for all decision variables
    problem.function = functools.partial(ExpectationRisk,
                                         arg1=n,
                                         arg2=odds,
                                         arg3=Event_Probs,
                                         arg4=Bets,
                                         arg5=outc)
    problem.constraints[:] = "<=" + str(
        Total_BetMax
    ) + ""  #inequality constraints: sum of bets no more than BetMax
    algorithm = NSGAII(problem)
    algorithm.run(5000)

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.show()

    Stakes = []

    Ratio = 0

    for i in range(len(algorithm.result)):
        #objectives are: -data[0] = Expectation, data[1] = Variance

        #Exp/Variance ration threshold
        Exp = algorithm.result[i].objectives._data[0]
        Var = algorithm.result[i].objectives._data[1]
        curRatio = Exp / Var

        if curRatio > Ratio:
            Ratio = curRatio
            Stakes = algorithm.result[i].variables
            ExpOut = Exp
            VarOut = Var

    return (Stakes, Bets, ExpOut, VarOut)
示例#6
0
    def nn_opt(self, nn):
        with torch.no_grad():

            def obj_cons(x):
                tx = torch.tensor(x)
                out = nn(tx)
                return out[:self.nobj].numpy().tolist(), out[self.nobj:].numpy(
                ).tolist()

            def obj_ucons(x):
                tx = torch.tensor(x)
                return nn(tx).numpy().tolist()

            arch = Archive()
            if self.ncons == 0:
                prob = Problem(self.dim, self.nobj)
                prob.function = obj_ucons
            else:
                prob = Problem(self.dim, self.nobj, self.ncons)
                prob.function = obj_cons
                prob.constraints[:] = "<=0"
            prob.types[:] = [Real(self.lb, self.ub) for i in range(self.dim)]
            self.algo = NSGAII(prob, population=50, archive=arch)
            self.algo.run(5000)

            optimized = self.algo.result
            rand_idx = np.random.randint(len(optimized))
            suggested_x = torch.tensor(optimized[rand_idx].variables)
            suggested_y = nn(suggested_x)
            return suggested_x.view(-1, self.dim), suggested_y.view(
                -1, self.nobj + self.ncons)
示例#7
0
def platypus_cube(objective,
                  scale,
                  n_trials,
                  n_dim,
                  strategy,
                  with_count=False):

    global feval_count
    feval_count = 0

    def _objective(vars):
        global feval_count
        feval_count += 1
        return objective(list(vars))[0]

    problem = Problem(n_dim, 1, 0)
    problem.types[:] = [Real(-scale, scale)] * n_dim
    problem.constraints[:] = "<=0"
    problem.function = _objective

    algorithm = strategy(problem)
    algorithm.run(n_trials)
    feasible_solution_obj = [
        s.objectives[0] for s in algorithm.result if s.feasible
    ]
    best_obj = min(feasible_solution_obj)
    return (best_obj, feval_count) if with_count else best_obj
示例#8
0
def generate_initial_population(pop_size=10, number_of_gd_steps=50):
    p1 = GradientDescentAdam(R, lr=alpha)
    initial_pop = []
    meta_g = None
    vec_len = None

    for x in range(pop_size):
        _, _, _, _, G, S = p1.optimize(number_of_gd_steps, ks=ks)
        v, meta = roll(G, S)
        meta_g = meta
        vec_len = len(v)
        initial_pop.append(v.tolist())

    problem = Problem(vec_len, 1)
    problem.types[:] = Real(0, 1000)
    problem.function = partial(fit, p1, meta_g)

    generator = RandomGenerator()
    population = []

    for i in range(pop_size):
        p = generator.generate(problem)
        p.variables = initial_pop[i]
        problem.evaluate(p)
        population.append(p)

    return problem, population, meta_g, generator
示例#9
0
def platypus_cube(objective, n_trials, n_dim, with_count=False, method=None):
    global feval_count
    feval_count = 0

    def _objective(vars):
        global feval_count
        feval_count += 1
        return float(objective(
            list(vars)))  # Avoid np.array as Platypus may puke

    problem = Problem(n_dim, 1, 0)
    problem.types[:] = [Real(0.0, 1.0)] * n_dim
    problem.constraints[:] = "<=0"
    problem.function = _objective

    strategy_and_args = PLATYPUS_ALGORITHMS[method]
    if isinstance(strategy_and_args, tuple):
        strategy = strategy_and_args[0]
        strategy_args = strategy_and_args[1]
        algorithm = strategy(problem, **strategy_args)
    else:
        strategy = strategy_and_args
        algorithm = strategy(problem)

    algorithm.run(n_trials)
    feasible_solution_obj = sorted([(s.objectives[0], s.variables)
                                    for s in algorithm.result if s.feasible],
                                   reverse=False)
    best_obj, best_x = feasible_solution_obj[0]
    if isinstance(best_x, FixedLengthArray):
        best_x = best_x._data  # CMA-ES returns it this way for some reason
    return (best_obj, best_x, feval_count) if with_count else (best_obj,
                                                               best_x)
 def get_bounds(self):
     types = []
     bounds = np.zeros([self.n * self.m, 2])
     for i in range(self.n * self.m):
         bounds[i][0] = 0
         bounds[i][1] = 200
         types.append(Real(bounds[i][0], bounds[i][1]))
     return types
示例#11
0
def autoIterate(model,
                river,
                reach,
                rs,
                flow,
                stage,
                nct,
                plot,
                outf,
                metrics,
                correctDatum,
                evals=None,
                si=False):
    """
    Automatically iterate with NSGA-II
    """
    keys = metrics  # ensure same order
    evalf = evaluator(stage, useTests=keys, correctDatum=correctDatum)
    evals = int(
        input("How many evaluations to run? ")) if evals is None else evals
    plotpath = ".".join(outf.split(".")[:-1]) + ".png"
    count = 1
    print("Running automatic calibration")

    def manningEval(vars):
        n = vars[0]
        metrics = minimized(
            nstageSingleRun(model, river, reach, rs, stage, n, keys,
                            correctDatum))
        values = [metrics[key] for key in keys]
        constraints = [-n, n - 1]
        nonlocal count
        print("Completed %d evaluations" % count)
        count += 1
        return values, constraints

    c_type = "<0"
    problem = Problem(
        1, len(keys),
        2)  # 1 decision variable, len(keys) objectives, and 2 constraints
    problem.types[:] = Real(0.001, 1)  # range of decision variable
    problem.constraints[:] = c_type
    problem.function = manningEval

    algorithm = NSGAII(problem, population_size=nct)
    algorithm.run(evals)
    nondom = nondominated(
        algorithm.result
    )  # nondom: list of Solutions - wanted value is variables[0]
    nondomNs = [sol.variables[0] for sol in nondom]
    results = runSims(model, nondomNs, river, reach, len(stage), range=[rs])
    resultPts = [(nondomNs[ix],
                  [results[ix][rs][jx] for jx in range(1,
                                                       len(stage) + 1)])
                 for ix in range(len(nondomNs))]
    metrics = [(res[0], evalf(res[1]), res[1]) for res in resultPts]
    nDisplay(metrics, flow, stage, plotpath, outf, plot, correctDatum, si)
    return metrics
示例#12
0
    def __init__(self, Amin, M, J, splits, objectives, TFmax=7):
        """
        @param Amin: minimum number of antecedents per rule
        @param M: maximum number of rules per individual
        @param J: matrix representing the initial rulebase
        @param splits: fuzzy partitions for input features
        @param objectives: array of objectives (2 or more). The first one is typically expresses the performance
                        for predictions. Indeed, the solutions will be sorted according to this objective (best-to-worst)
        @param TFmax: maximum number of fuzzy sets per feature (i.e. maximum granularity)
        """
        self.Amin = Amin
        self.M = M
        self.Mmax = J.shape[0]
        self.Mmin = len(set(J[:, -1]))
        self.M = np.clip(self.M, self.Mmin, self.Mmax)
        self.F = J.shape[1] - 1
        self.TFmax = TFmax

        self.J = J
        self.initialBfs = splits
        self.G = np.array([len(split) for split in splits])

        self.objectives = objectives
        self.crb_l = 2 * self.M
        self.gran_l = self.F
        self.cdb_l = self.gran_l * self.TFmax

        # Creation of the problem and assignment of the types
        #      CRB: rule part
        #      Granularities: number of fuzzy sets
        #      CDB: database part
        super(RCSProblem, self).__init__(self.crb_l + self.gran_l + self.cdb_l,
                                         len(objectives))
        self.types[0:self.crb_l:2] = [Real(0, self.M) for _ in range(self.M)]
        self.types[1:self.crb_l:2] = [Binary(self.F) for _ in range(self.M)]
        self.types[self.crb_l:self.crb_l +
                   self.gran_l] = [Real(2, self.TFmax) for _ in range(self.F)]
        self.types[self.crb_l +
                   self.gran_l:] = [Real(0, 1) for _ in range(self.cdb_l)]

        # Maximize objectives by minimizing opposite
        self.directions[:] = [Problem.MINIMIZE for _ in range(len(objectives))]

        self.train_x = None
        self.train_y = None
示例#13
0
 def __init__(self, universe: Universe, buying_power: float):
     super(OptPortfolio, self).__init__(universe.count, 1, 1)
     self.universe = universe
     self.universe_historical_data = gen_universe_hist_data(
         universe.universe_set, "Adj. Close")
     self.buying_power = buying_power
     self.types[:] = Real(0, 1)
     self.constraints[:] = "<=0"
     self.directions[:] = Problem.MAXIMIZE
示例#14
0
def platypus_cube(objective,scale, n_trials, strategy):

    def _objective(vars):
        u1 = vars[0]
        u2 = vars[1]
        u3 = vars[2]
        return objective([u1,u2,u3])[0]

    problem = Problem(3, 1, 0)
    problem.types[:] = [Real(-scale, scale), Real(-scale, scale), Real(-scale, scale)]
    problem.constraints[:] = "<=0"
    problem.function = _objective

    algorithm = strategy(problem)
    algorithm.run(n_trials)
    feasible_solution_obj = [s.objectives[0] for s in algorithm.result if s.feasible]
    best_obj = min(feasible_solution_obj)
    return best_obj
示例#15
0
 def __init__(self, host):
     
     self.host = host 
     n = host.number_of_params()
     nconstraints = len(host.train_args['label']) + 1
     print('#number of constraints ', nconstraints)
     super(InterestingnessProblem, self).__init__(n, 2, nconstraints)
     
     self.types[:] = [Real(-10, 10) for _ in range(n)]
     self.constraints[:] = "<=0"
示例#16
0
    def __init__(self,
                 stream_network,
                 starting_water_price=800,
                 total_units_needed_factor=0.99,
                 objectives=2,
                 min_proportion=0,
                 simplified=False,
                 plot_output_folder=None,
                 *args):
        """

		:param decision_variables: when this is set to None, it will use the number of HUCs as the number of decision
			variables
		:param objectives:  default is two (total needs met, and min by species)
		:param min_proportion: What is the minimum proportion of flow that we can allocate to any single segment? Raising
				this value (min 0, max 0.999999999) prevents the model from extracting all its water in one spot.
		:param args:
		"""

        self.stream_network = stream_network
        self.stream_network.economic_benefit_calculator = economic_components.EconomicBenefit(
            starting_water_price,
            total_units_needed=self.get_needed_water(
                total_units_needed_factor))
        if simplified:
            self.decision_variables = 365
            self.simplified = True
        else:
            self.decision_variables = len(
                stream_network.stream_segments
            ) * 365  # we need a decision variable for every stream segment and day - we'll reshape them later
            self.simplified = False

        self.iterations = []
        self.objective_1 = []
        self.objective_2 = []

        self.best_obj1 = 0
        self._best_obj2_for_obj1 = 0
        self.best_obj2 = 0

        self.plot_output_folder = plot_output_folder

        log.info("Number of Decision Variables: {}".format(
            self.decision_variables))
        super(StreamNetworkProblem,
              self).__init__(self.decision_variables, objectives,
                             *args)  # pass any arguments through

        self.directions[:] = Problem.MAXIMIZE  # we want to maximize all of our objectives
        self.types[:] = Real(
            min_proportion,
            1)  # we now construe this as a proportion instead of a raw value

        self.eflows_nfe = 0
示例#17
0
def set_problem_types(config_space, problem, instance_features=None):
    """
    set problem.types for algorithms in platypus (NSGAII, ...)
    """

    if instance_features is not None:
        raise NotImplementedError
    for i, param in enumerate(config_space.get_hyperparameters()):
        if isinstance(param, (CategoricalHyperparameter)):
            n_cats = len(param.choices)
            problem.types[i] = Integer(0, n_cats - 1)
        elif isinstance(param, (OrdinalHyperparameter)):
            n_cats = len(param.sequence)
            problem.types[i] = Integer(0, n_cats - 1)
        elif isinstance(param, UniformFloatHyperparameter):
            problem.types[i] = Real(0, 1)
        elif isinstance(param, UniformIntegerHyperparameter):
            problem.types[i] = Real(0, 1)
        else:
            raise TypeError("Unsupported hyperparameter type %s" % type(param))
示例#18
0
    def set_types(self):
        """
			Sets the type of each decision variable and makes it the max, should be in the same order that we
			assign flows out later, so the max values should allign with the allocations that come in.
		:return:
		"""
        allocation_index = 0
        hucs = self.hucs
        for huc in hucs:
            self.types[allocation_index] = Real(0, huc.max_possible_flow)
            allocation_index += 1
示例#19
0
    def update(self, **kwargs):
        """
        Rewrite update
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'eta' in kwargs and 'num_data' in kwargs
        super(USeMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]
        assert self.Y_dim > 1

        # update single acquisition function
        for i in range(self.Y_dim):
            self.single_acq[i].update(model=self.model[i],
                                      eta=self.eta[i],
                                      num_data=self.num_data)
            self.uncertainty_acq[i].update(model=self.model[i],
                                           eta=self.eta[i],
                                           num_data=self.num_data)

        def CMO(x):
            x = np.asarray(x)
            # minimize negative acq
            return [
                -self.single_acq[i](x, convert=False)[0][0]
                for i in range(self.Y_dim)
            ]

        problem = Problem(self.X_dim, self.Y_dim)
        for k in range(self.X_dim):
            problem.types[k] = Real(self.bounds[k][0],
                                    self.bounds[k][1])  # todo other types
        problem.function = CMO
        algorithm = NSGAII(problem)  # todo population_size
        algorithm.run(2500)
        cheap_pareto_set = [
            solution.variables for solution in algorithm.result
        ]
        # cheap_pareto_set_unique = []
        # for i in range(len(cheap_pareto_set)):
        #     if not any((cheap_pareto_set[i] == x).all() for x in self.X):
        #         cheap_pareto_set_unique.append(cheap_pareto_set[i])
        cheap_pareto_set_unique = cheap_pareto_set

        single_uncertainty = np.array([
            self.uncertainty_acq[i](np.asarray(cheap_pareto_set_unique),
                                    convert=False) for i in range(self.Y_dim)
        ])  # shape=(Y_dim, N, 1)
        single_uncertainty = single_uncertainty.reshape(self.Y_dim,
                                                        -1)  # shape=(Y_dim, N)
        self.uncertainties = np.prod(single_uncertainty,
                                     axis=0)  # shape=(Y_dim,) todo normalize?
        self.candidates = np.array(cheap_pareto_set_unique)
def cal(args, exeCount):
    start = time.time()
    param_count = 5  # パラメーター数

    problem = Problem(param_count, 2)  # 最適化パラメータの数, 目的関数の数
    problem.types[:] = Real(-2.0, 2.0)  # パラメータの範囲
    problem.function = schaffer
    algorithm = NSGAII(problem)
    algorithm.run(5000)  # 反復回数

    print('{:-^63}'.format('-'))

    # データ整理
    # params: 係数a
    # f1s   : 残留振動 [deg]
    # f2s   : エネルギー
    params = np.empty([100, param_count])
    f1s = np.empty([100])
    f2s = np.empty([100])
    for i, solution in enumerate(algorithm.result):
        result = tuple(solution.variables \
                     + solution.objectives[:])

        params[i, :] = result[:param_count][:]
        f1s[i] = 180 * result[param_count] / np.pi
        f2s[i] = result[param_count + 1]

    # 残留振動が最小になるaの値を表示
    index = np.argmin(f1s)
    print('\n*** 残留振動が最小の時の各値 ***')
    print('残留振動[deg]\t{}'.format(f1s[index]))
    print('エネルギー[J]\t{}'.format(f2s[index]))
    print('係数a\t\t{}'.format(params[index, :]))

    np.savetxt('./results/nsga2/{}/nsga2_params_{}.csv'.format(
        args[1], exeCount),
               params[index, :],
               delimiter=',')

    # 経過時間
    print(f'\nelapsed time: {time.time()-start}')

    # 係数a, 残留振動, エネルギーをCSVファイルに書き出す
    data = np.empty([100, param_count + 2])
    data[:, 0:param_count] = params
    data[:, param_count] = f1s
    data[:, param_count + 1] = f2s
    np.savetxt('./results/nsga2/{}/nsga2_data_{}.csv'.format(
        args[1], exeCount),
               data,
               delimiter=',')
    def update(self, **kwargs):
        """
        Rewrite update to support pareto front sampling.
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'constraint_perfs' in kwargs
        super(MESMOC, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]

        self.Multiplemes = [None] * self.Y_dim
        self.Multiplemes_constraints = [None] * self.num_constraints
        for i in range(self.Y_dim):
            self.Multiplemes[i] = MaxvalueEntropySearch(self.model[i], self.X, self.Y[:, i],
                                                        random_state=self.random_state)
            self.Multiplemes[i].Sampling_RFM()
        for i in range(self.num_constraints):
            # Caution dim of self.constraint_perfs!
            self.Multiplemes_constraints[i] = MaxvalueEntropySearch(self.constraint_models[i],
                                                                    self.X, self.constraint_perfs[i])
            self.Multiplemes_constraints[i].Sampling_RFM()

        self.min_samples = []
        self.min_samples_constraints = []
        for j in range(self.sample_num):
            for i in range(self.Y_dim):
                self.Multiplemes[i].weigh_sampling()
            for i in range(self.num_constraints):
                self.Multiplemes_constraints[i].weigh_sampling()

            def CMO(xi):
                xi = np.asarray(xi)
                y = [self.Multiplemes[i].f_regression(xi)[0][0] for i in range(self.Y_dim)]
                y_c = [self.Multiplemes_constraints[i].f_regression(xi)[0][0] for i in range(self.num_constraints)]
                return y, y_c

            problem = Problem(self.X_dim, self.Y_dim, self.num_constraints)
            for k in range(self.X_dim):
                problem.types[k] = Real(self.bounds[k][0], self.bounds[k][1])  # todo other types
            problem.constraints[:] = "<=0"  # todo confirm
            problem.function = CMO
            algorithm = NSGAII(problem)
            algorithm.run(1500)
            cheap_pareto_front = [list(solution.objectives) for solution in algorithm.result]
            cheap_constraints_values = [list(solution.constraints) for solution in algorithm.result]
            # picking the min over the pareto: best case
            min_of_functions = [min(f) for f in list(zip(*cheap_pareto_front))]
            min_of_constraints = [min(f) for f in list(zip(*cheap_constraints_values))]  # todo confirm
            self.min_samples.append(min_of_functions)
            self.min_samples_constraints.append(min_of_constraints)
示例#22
0
def generate_initial_population(number_of_gd_steps=50):
    p1 = GradientDescentAdam(R, lr=alpha)
    initial_pop = []
    scale = 0.01

    noise_scale=0.0000001

    Gv, Sv = p1.construct_starting_point(ks, scale=scale)

    _, _, _, _, G, S = p1.optimize(G=Gv.copy(), S=Sv.copy(), steps=number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    initial_pop.append(v.tolist())

    GvNoise = []
    for x in Gv:
        GvNoise.append(x+np.random.randn(*x.shape)*noise_scale)

    SvNoise = []
    for x in Sv:
        n=[]
        for y in x:
            n.append(y+np.random.randn(*y.shape)*noise_scale)
        SvNoise.append(n)

    _, _, _, _, G, S = p1.optimize(G=GvNoise.copy(), S=SvNoise.copy(), steps=number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    initial_pop.append(v.tolist())

    np.random.rand()
    _, _, _, _, G, S = p1.optimize(number_of_gd_steps, ks=ks)
    v, meta = roll(G, S)
    meta_g = meta
    vec_len = len(v)
    initial_pop.append(v.tolist())


    problem = Problem(vec_len, 1)
    problem.types[:] = Real(0, 1000)
    problem.function = partial(fit, p1, meta_g)

    generator = RandomGenerator()
    population = []

    for i in range(3):
        p = generator.generate(problem)
        p.variables = initial_pop[i]
        problem.evaluate(p)
        population.append(p)

    return problem, population, meta_g, p1
示例#23
0
def platypus_grid(w_, garden_index, grids):
    # first compute convex hull of grids
    # parametrize problem by just the center of the grid
    points_list = []
    for i in range(grids.shape[0]):
        p = Point(grids[i, :])
        points_list.append(p)

    mtp = MultiPoint(points_list)
    cvx_hull = mtp.convex_hull
    gminx, gminy, gmaxx, gmaxy = cvx_hull.bounds

    problem = Problem(3, 2)
    problem.types[0] = Real(gminx, gmaxx)
    problem.types[1] = Real(gminy, gmaxy)
    problem.types[2] = Real(0, 180)
    problem.function = platypus_obj
    algorithm = NSGAII(problem)

    algorithm.run(100)
    print(algorithm.result)

    return algorithm.result
示例#24
0
 def __init__(self,
              template_objects,
              regularization_samples,
              generalization_generator,
              classifier=None,
              range=20):
     super(GOL, self).__init__(1, 2)
     self.types[:] = Real(0, 500)
     self.template_objects = template_objects
     self.regularization_samples = regularization_samples
     self.generalization_generator = generalization_generator
     self.classifier = classifier
     self.range = range
     self.energy = None
示例#25
0
    def run_nsgaii_bc():
        def CMO(xi):
            xi = np.asarray(xi)
            y = [branin(xi), Currin(xi)]
            return y

        problem = Problem(2, 2)
        problem.types[:] = Real(0, 1)
        problem.function = CMO
        algorithm = NSGAII(problem)
        algorithm.run(2500)
        cheap_pareto_front = np.array(
            [list(solution.objectives) for solution in algorithm.result])
        return cheap_pareto_front
示例#26
0
    def run(self, n=10000):
        comp = [2000, 2000, 1000, 1000, 1000]
        for c in comp:
            x, y = self.generateLHS(c)
            self.ranComp(x, y)
            logger.debug(f"-- finish {c} --")

        sum = 0
        for p in comp:
            sum += p
        num = n - sum
        logger.debug(num)
        r = [Real(x[0], x[1]) for x in self.ran]
        mo = MOEA_D(des=self.dim, obj=self.obj, f=self.f, r=r)
        mo.run(n=num)
示例#27
0
    def __init__(self,
                 one_shot_objects,
                 regularization_samples,
                 generalization_generator,
                 classifier=None):
        super(GOL, self).__init__(1, 2)
        self.types[:] = Real(0, 500)
        self.one_shot_objects = one_shot_objects
        self.regularization_samples = regularization_samples
        self.generalization_generator = generalization_generator
        self.classifier = classifier
        self.gen_energy = None
        self.epoch_count = 0

        # Plot the initial distribution of the GOL model parameters
        self._plot_distributions(self.epoch_count)
示例#28
0
 def __init__(self,
              num_variables,
              num_objectives,
              X,
              T,
              n_hidden,
              sparse_degree=0.05,
              num_constraints=0):
     super(Objectives, self).__init__(
         num_variables, num_objectives, num_constraints
     )  # the number of decision variables, objectives, and constraints
     self.types[:] = Real(
         -1., 1.)  # specify the type(coding scheme) of decision variables
     self.X, self.T = X, T
     self.n_hidden = n_hidden
     self.sparse_degree = sparse_degree
def make_multiobjective_function_counting(sources,
                                          bounds,
                                          times_more_detectors=1,
                                          interpolation_method="nearest"):
    """
    This balances the number of detectors with the quality of the outcome
    bounds : list[(x_min, x_max), (y_min, y_max), ...]
        The bounds on the feasible region
    """
    objective_function = make_single_objective_function(
        sources, interpolation_method=interpolation_method,
        masked=True)  # the function to be optimized
    counting_function = make_counting_objective()

    def multiobjective_func(x):  # this is the double objective function
        return [objective_function(x), counting_function(x)]

    # there is an x, y, and a mask for each source so there must be three
    # times more input variables
    # the upper bound on the number of detectors n times the number of
    # sources
    parameterized_locations = sources[0].parameterized_locations
    dimensionality = parameterized_locations.shape[1]

    # We add a boolean flag to each location variable
    num_inputs = len(sources) * (dimensionality + 1) * times_more_detectors
    NUM_OUPUTS = 2  # the default for now
    # define the demensionality of input and output spaces
    problem = Problem(num_inputs, NUM_OUPUTS)

    logging.warning(
        f"Creating a multiobjective counting function with dimensionality {dimensionality}"
    )
    logging.warning(f"bounds are {bounds}")

    for i in range(dimensionality):
        # splat "*" notation is expanding the pair which is low, high
        problem.types[i::(dimensionality + 1)] = Real(
            *bounds[i])  # This is the feasible region

    # indicator on whether the source is on
    problem.types[dimensionality::(dimensionality + 1)] = Binary(1)
    problem.function = multiobjective_func
    return problem
def NSGA_test(data_name, input_dim, output_dim, x_bounds, epoch, population):
    """
    bench_mark functionのjson
    "function_name":{
        "hypervolume":,
        "v_ref":[],
        "w_ref":[],
        "x_bounds":[],
        "input_dim":,
        "output_dim":
    }
    """

    problem = Problem(input_dim, output_dim)
    problem.types[:] = [
        Real(x_bounds[i][0], x_bounds[i][1]) for i in range(input_dim)
    ]
    problem.function = eval(data_name)
    algorithm = NSGAII(problem, population_size=population)
    start = time.perf_counter()
    algorithm.run(epoch)
    end = time.perf_counter() - start
    v_ref = []
    w_ref = []
    w_ref_norm = []
    print(len(algorithm.result))
    pareto_frontier = np.zeros((len(algorithm.result), output_dim))
    pareto_frontier_norm = np.zeros((len(algorithm.result), output_dim))
    for i in range(output_dim):
        frontier_i = np.array([s.objectives[i] for s in algorithm.result])
        pareto_frontier[:, i] = frontier_i
        min_i = np.min(frontier_i)
        max_i = np.max(frontier_i)
        frontier_i_norm = (frontier_i - min_i) / (max_i - min_i)
        pareto_frontier_norm[:, i] = frontier_i_norm
        v_ref.append(min_i)
        w_ref.append(max_i)
        w_ref_norm.append(1)

    hyp = Hypervolume(minimum=v_ref, maximum=w_ref)
    HV = hyp(algorithm.result)
    return HV, end