Example #1
0
def calibrate(run_counts):
    """调用优化计算模型进行参数优选
    Parameters
    ----------
    run_counts: int   运行次数

    Returns
    ---------
    optimal_params: list
          非劣解集
    """
    algorithm = NSGAII(XajCalibrate(), population_size=500, variator=GAOperator(SBX(0.95, 20.0), PM(2, 25.0)))
    algorithm.run(run_counts)
    # algorithm.run(run_counts)

    # We could also get only the non-dominated solutions,这里只展示非劣解集
    nondominated_solutions = nondominated(algorithm.result)

    # plot the results using matplotlib
    # 如果目标超过三个,可视化方面需要进行降维操作,这里先以两个目标为例进行分析
    plt.scatter([s.objectives[0] for s in nondominated_solutions],
                [s.objectives[1] for s in nondominated_solutions])
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.xlabel("$mare$")
    plt.ylabel("$nse$")
    plt.show()

    # 返回最优参数
    optimal_params = []
    for nondominated_solution in nondominated_solutions:
        optimal_params.append(nondominated_solution.variables)
    return optimal_params
Example #2
0
def project():
    conn = sqlite3.connect('products.db')
    companies = get_companies(conn)

    problem = Problem(len(companies), 2)
    #problem.types[:] = [Integer(1, nlojas) for _ range(nprodutos)]

    problem.function = schaffer

    algorithm = NSGAII(problem)
    algorithm.run(10000)

    company = 0
    while (company < len(companies)):
        result = schaffer(companies[company][0], conn)
        print(result)
        company += 1

    conn.close()

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.xlim([0, 1.1])
    plt.ylim([0, 1.1])
    plt.xlabel("Preço")
    plt.ylabel("Distancia")
    plt.show()
Example #3
0
def main():
    os = OneShotObjects()
    rs = RegularizationSamples(os, num_reg_samples_per_object=10)
    gg = GeneralizationGenerator(os,
                                 rs,
                                 num_objects=os.num_objects,
                                 num_gen_samples_per_object=1000)
    cs = Classifier(num_classes=os.num_objects, range=os.range)

    gol_model = GOL(os, rs, gg, cs)

    opt_algorithm = NSGAII(gol_model)
    opt_algorithm.run(2)

    for solution in opt_algorithm.result:
        print(solution.objectives)

    print(np.shape(opt_algorithm.result))

    # Uncomment this code block for saving the pareto solutions to a file
    '''
    with open('pareto_solutions.csv', 'w', newline='') as f:
        writer = csv.writer(f)
        for s in range(np.shape(opt_algorithm.result)[0]):
            writer.writerow((s.objectives[0], s.objectives[1]))
        f.close()
    '''

    plt.scatter([s.objectives[0] for s in opt_algorithm.result],
                [s.objectives[1] for s in opt_algorithm.result])
    #plt.xlim([0, 500.1])
    #plt.ylim([0, 1.1])
    plt.xlabel("$J(\Theta, \hat{x}(t))$")
    plt.ylabel("$a(\Theta, \hat{x}(t))$")
    plt.show()
def Pareto_Front_Strategy(Probs, odds, Total_BetMax, Max_individual_bet,
                          Min_individual_bet):

    #NOTE: The order of bets and Probs is as follows: 1st (1) is AWAY win and 2nd (2) is HOME win

    Bets = np.argmax(Probs, 1) + 1

    n = len(Probs)  #number of games

    # Calculate probabilities for ALL the events together (i.e. each outcome). This will be used as weights for our optimisation
    k = n
    num_events = int(pow(2, n))
    Event_Probs = np.zeros(num_events)

    outc = outcomes_HACK(n)

    for i in range(num_events):
        individual_probs = np.diag(Probs[:, outc[i, :] - 1])
        Event_Probs[i] = PoissonBinomialPDF(k, n, individual_probs)

    #Pareto optimization
    problem = Problem(n, 2, 1)  #vars, problem dim, constraints
    problem.types[:] = Real(
        Min_individual_bet,
        Max_individual_bet)  #lower and upper bounds for all decision variables
    problem.function = functools.partial(ExpectationRisk,
                                         arg1=n,
                                         arg2=odds,
                                         arg3=Event_Probs,
                                         arg4=Bets,
                                         arg5=outc)
    problem.constraints[:] = "<=" + str(
        Total_BetMax
    ) + ""  #inequality constraints: sum of bets no more than BetMax
    algorithm = NSGAII(problem)
    algorithm.run(5000)

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.show()

    Stakes = []

    Ratio = 0

    for i in range(len(algorithm.result)):
        #objectives are: -data[0] = Expectation, data[1] = Variance

        #Exp/Variance ration threshold
        Exp = algorithm.result[i].objectives._data[0]
        Var = algorithm.result[i].objectives._data[1]
        curRatio = Exp / Var

        if curRatio > Ratio:
            Ratio = curRatio
            Stakes = algorithm.result[i].variables
            ExpOut = Exp
            VarOut = Var

    return (Stakes, Bets, ExpOut, VarOut)
Example #5
0
def main():
    to = TemplateObjects()
    rs = RegularizationSamples(to, num_reg_samples_per_object=10)
    gg = GeneralizationGenerator(to,
                                 rs,
                                 num_objects=to.num_objects,
                                 num_gen_samples_per_object=100)
    cs = Classifier(num_classes=to.num_objects)

    #model = GOL(to, rs, gg, cs)
    #model.train()

    algorithm = NSGAII(GOL(to, rs, gg, cs))
    algorithm.run(1)

    for solution in algorithm.result:
        print(solution.objectives)

    print(np.shape(algorithm.result))

    plt.scatter([s.objectives[0] for s in algorithm.result],
                [s.objectives[1] for s in algorithm.result])
    plt.xlim([0, 500.1])
    plt.ylim([0, 1.1])
    plt.xlabel("$f_1(x)$")
    plt.ylabel("$f_2(x)$")
    plt.show()
Example #6
0
    def update(self, **kwargs):
        """
        Rewrite update
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'eta' in kwargs and 'num_data' in kwargs
        super(USeMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]
        assert self.Y_dim > 1

        # update single acquisition function
        for i in range(self.Y_dim):
            self.single_acq[i].update(model=self.model[i],
                                      eta=self.eta[i],
                                      num_data=self.num_data)
            self.uncertainty_acq[i].update(model=self.model[i],
                                           eta=self.eta[i],
                                           num_data=self.num_data)

        def CMO(x):
            x = np.asarray(x)
            # minimize negative acq
            return [
                -self.single_acq[i](x, convert=False)[0][0]
                for i in range(self.Y_dim)
            ]

        problem = Problem(self.X_dim, self.Y_dim)
        set_problem_types(self.config_space, problem)
        problem.function = CMO

        variator = get_variator(self.config_space)
        algorithm = NSGAII(problem, population_size=100, variator=variator)
        algorithm.run(2500)
        # decode
        for s in algorithm.result:
            s.variables[:] = [
                problem.types[i].decode(s.variables[i])
                for i in range(problem.nvars)
            ]
        cheap_pareto_set = [
            solution.variables for solution in algorithm.result
        ]
        # cheap_pareto_set_unique = []
        # for i in range(len(cheap_pareto_set)):
        #     if not any((cheap_pareto_set[i] == x).all() for x in self.X):
        #         cheap_pareto_set_unique.append(cheap_pareto_set[i])
        cheap_pareto_set_unique = cheap_pareto_set

        single_uncertainty = np.array([
            self.uncertainty_acq[i](np.asarray(cheap_pareto_set_unique),
                                    convert=False) for i in range(self.Y_dim)
        ])  # shape=(Y_dim, N, 1)
        single_uncertainty = single_uncertainty.reshape(self.Y_dim,
                                                        -1)  # shape=(Y_dim, N)
        self.uncertainties = np.prod(single_uncertainty,
                                     axis=0)  # shape=(Y_dim,) todo normalize?
        self.candidates = np.array(cheap_pareto_set_unique)
Example #7
0
    def fit(self, x, y, bound=None, name=None):

        self.y = np.array(y)
        self.x = x

        TS = 1
        ND = len(y) - 1

        y = y / self.N

        t_start = 0.0
        t_end = ND
        t_inc = TS
        t_range = np.arange(t_start, t_end + t_inc, t_inc)

        Model_Input = (self.S0, self.I0, self.R0)

        # GA Parameters
        number_of_generations = 1000
        ga_population_size = 100
        number_of_objective_targets = 1  # The MSE
        number_of_constraints = 0
        number_of_input_variables = 2  # beta and gamma
        problem = Problem(number_of_input_variables,
                          number_of_objective_targets, number_of_constraints)
        problem.function = functools.partial(self.fitness_function,
                                             y=y,
                                             Model_Input=Model_Input,
                                             t_range=t_range)

        algorithm = NSGAII(problem, population_size=ga_population_size)

        problem.types[0] = Real(0, 1)  # beta initial Range
        problem.types[1] = Real(1 / 5, 1 / 14)  # gamma initial Range

        # Running the GA
        algorithm.run(number_of_generations)

        feasible_solutions = [s for s in algorithm.result if s.feasible]

        self.beta = feasible_solutions[0].variables[0]
        self.gamma = feasible_solutions[0].variables[1]

        input_variables = ['beta', 'gamma']
        file_address = 'optimised_coefficients/'
        filename = "ParametrosAjustados_Modelo_{}_{}_{}_Dias.txt".format(
            'SIR_EDO', name, len(x))

        if not os.path.exists(file_address):
            os.makedirs(file_address)

        file_optimised_parameters = open(file_address + filename, "w")
        file_optimised_parameters.close()
        if not os.path.exists(file_address):
            os.makedirs(file_address)
        with open(file_address + filename, "a") as file_optimised_parameters:
            for i in range(len(input_variables)):
                message = '{}:{:.4f}\n'.format(
                    input_variables[i], feasible_solutions[0].variables[i])
                file_optimised_parameters.write(message)
Example #8
0
def main():
    to = TemplateObjects()
    rs = RegularizationSamples(to, num_reg_samples_per_object=10)
    gg = GeneralizationGenerator(to,
                                 rs,
                                 num_objects=to.num_objects,
                                 num_gen_samples_per_object=100)
    cs = Classifier(num_classes=to.num_objects)

    #model = GOL(to, rs, gg, cs)
    #model.train()

    algorithm = NSGAII(GOL(to, rs, gg, cs))
    algorithm.run(2)

    for solution in algorithm.result:
        print(solution.objectives)

    print(np.shape(algorithm.result))

    with open('pareto_solutions.csv', 'w', newline='') as f:
        writer = csv.writer(f)
        for s in range(np.shape(algorithm.result)[0]):
            writer.writerow((s.objectives[0], s.objectives[1]))
        f.close()
Example #9
0
def test_platypus_nsgaii_process_pool(two_reservoir_problem):
    """Undertake a single step of the NSGAII algorithm with a ProcessPool."""
    with ProcessPoolEvaluator(2) as evaluator:
        algorithm = NSGAII(two_reservoir_problem.problem,
                           population_size=50,
                           evaluator=evaluator)
        algorithm.run(10)
Example #10
0
def validate_flow_methods(model_run_name="upper_cosumnes_subset_2010",
                          show_plot=True):
    problem = run_optimize_new(run_problem=False,
                               model_run_name=model_run_name)["problem"]

    measurements = numpy.linspace(0, 1, 101)
    for measurement in measurements:
        log.info(measurement)
        initial_flows = optimize.SimpleInitialFlowsGenerator(measurement)

        runner = NSGAII(
            problem, generator=initial_flows, population_size=1
        )  # shouldn't matter what algorithm we use - we only do 1 NFE
        runner.run(
            1)  # run it for 1 NFE just to see what these initial flows do

    plt.plot(problem.iterations, problem.objective_1)

    plt.xlabel("Percent of Available Flow")
    plt.ylabel("Environmental Benefit")

    plt.savefig(os.path.join(settings.BASE_DIR, "data", "results",
                             "validation_plot_{}.png".format(model_run_name)),
                dpi=300)

    if show_plot:
        plt.show()

    plt.close()

    return {"x": problem.iterations, "y": problem.objective_1}
Example #11
0
def get_critical_nodes():
    # algorithm = NSGAII(problem=BOCNDP(), generator=DfsGenerator())
    algorithm = NSGAII(problem=BOCNDP(), generator=DegreeGenerator())
    # algorithm = NSGAII(problem=BOCNDP(), generator=RandomWalkGenerator())
    algorithm.run(10000)

    print(algorithm.result[0].objectives)
    return algorithm.result[0].objectives
 def _solve(args):
     problem, iterations, use_max = args
     algorithm = NSGAII(problem)
     algorithm.run(iterations)
     feasible_solutions = [
         s.objectives[0] for s in algorithm.result if s.feasible
     ]
     return max(feasible_solutions) if use_max else min(feasible_solutions)
def get_critical_nodes():
    algorithm = NSGAII(BOCNDP(),
                       selector=TournamentSelector(dominance=NashDominance()),
                       archive=NashArchive())
    algorithm.run(100)

    print(algorithm.result[0].objectives)
    return algorithm.result[0].objectives
Example #14
0
def autoIterate(model,
                river,
                reach,
                rs,
                flow,
                stage,
                nct,
                plot,
                outf,
                metrics,
                correctDatum,
                evals=None,
                si=False):
    """
    Automatically iterate with NSGA-II
    """
    keys = metrics  # ensure same order
    evalf = evaluator(stage, useTests=keys, correctDatum=correctDatum)
    evals = int(
        input("How many evaluations to run? ")) if evals is None else evals
    plotpath = ".".join(outf.split(".")[:-1]) + ".png"
    count = 1
    print("Running automatic calibration")

    def manningEval(vars):
        n = vars[0]
        metrics = minimized(
            nstageSingleRun(model, river, reach, rs, stage, n, keys,
                            correctDatum))
        values = [metrics[key] for key in keys]
        constraints = [-n, n - 1]
        nonlocal count
        print("Completed %d evaluations" % count)
        count += 1
        return values, constraints

    c_type = "<0"
    problem = Problem(
        1, len(keys),
        2)  # 1 decision variable, len(keys) objectives, and 2 constraints
    problem.types[:] = Real(0.001, 1)  # range of decision variable
    problem.constraints[:] = c_type
    problem.function = manningEval

    algorithm = NSGAII(problem, population_size=nct)
    algorithm.run(evals)
    nondom = nondominated(
        algorithm.result
    )  # nondom: list of Solutions - wanted value is variables[0]
    nondomNs = [sol.variables[0] for sol in nondom]
    results = runSims(model, nondomNs, river, reach, len(stage), range=[rs])
    resultPts = [(nondomNs[ix],
                  [results[ix][rs][jx] for jx in range(1,
                                                       len(stage) + 1)])
                 for ix in range(len(nondomNs))]
    metrics = [(res[0], evalf(res[1]), res[1]) for res in resultPts]
    nDisplay(metrics, flow, stage, plotpath, outf, plot, correctDatum, si)
    return metrics
Example #15
0
def generate_point(nobjs, population_size):
    # define the problem definition
    problem = DTLZ2(nobjs=nobjs)
    # instantiate the optimization algorithm
    algorithm = NSGAII(problem, population_size=population_size)
    algorithm.run(1000)
    # mutate_point = RandomGenerator().generate(problem=problem)
    return algorithm.result[0].objectives._data
    pass
def get_critical_nodes():
    algorithm = NSGAII(CNDP(),
                       selector=TournamentSelector(dominance=BergeDominance()),
                       archive=BergeArchive())
    algorithm.run(1000)

    fitness = algorithm.result[0].objectives[0]
    print(fitness)

    return fitness
Example #17
0
def get_critical_nodes():
    algorithm = NSGAII(BOCNDP())
    # algorithm = EpsMOEA(BOCNDP(), epsilons=[0.05])
    # algorithm = SPEA2(BOCNDP())
    # algorithm = IBEA(BOCNDP())
    # algorithm = PAES(BOCNDP())
    # algorithm = EpsNSGAII(BOCNDP(), epsilons=[0.05])
    algorithm.run(10000)

    print(algorithm.result[0].objectives)
    return algorithm.result[0].objectives
Example #18
0
def mo_run(population_size):
    mo_problem = Problem(2, 2, 2)
    mo_problem.types[:] = Binary(len(requirements))
    mo_problem.directions[0] = Problem.MAXIMIZE
    mo_problem.directions[1] = Problem.MINIMIZE
    mo_problem.constraints[:] = "<={}".format(budget)
    mo_problem.function = multi_objective_nrp
    mo_nsga = NSGAII(mo_problem, population_size)
    mo_nsga.run(runs)
    x_mo = [solution.objectives[0] for solution in mo_nsga.result]
    y_mo = [solution.objectives[1] * (-1) for solution in mo_nsga.result]
    return x_mo, y_mo, mo_nsga
def cal(args, exeCount):
    start = time.time()
    param_count = 5  # パラメーター数

    problem = Problem(param_count, 2)  # 最適化パラメータの数, 目的関数の数
    problem.types[:] = Real(-2.0, 2.0)  # パラメータの範囲
    problem.function = schaffer
    algorithm = NSGAII(problem)
    algorithm.run(5000)  # 反復回数

    print('{:-^63}'.format('-'))

    # データ整理
    # params: 係数a
    # f1s   : 残留振動 [deg]
    # f2s   : エネルギー
    params = np.empty([100, param_count])
    f1s = np.empty([100])
    f2s = np.empty([100])
    for i, solution in enumerate(algorithm.result):
        result = tuple(solution.variables \
                     + solution.objectives[:])

        params[i, :] = result[:param_count][:]
        f1s[i] = 180 * result[param_count] / np.pi
        f2s[i] = result[param_count + 1]

    # 残留振動が最小になるaの値を表示
    index = np.argmin(f1s)
    print('\n*** 残留振動が最小の時の各値 ***')
    print('残留振動[deg]\t{}'.format(f1s[index]))
    print('エネルギー[J]\t{}'.format(f2s[index]))
    print('係数a\t\t{}'.format(params[index, :]))

    np.savetxt('./results/nsga2/{}/nsga2_params_{}.csv'.format(
        args[1], exeCount),
               params[index, :],
               delimiter=',')

    # 経過時間
    print(f'\nelapsed time: {time.time()-start}')

    # 係数a, 残留振動, エネルギーをCSVファイルに書き出す
    data = np.empty([100, param_count + 2])
    data[:, 0:param_count] = params
    data[:, param_count] = f1s
    data[:, param_count + 1] = f2s
    np.savetxt('./results/nsga2/{}/nsga2_data_{}.csv'.format(
        args[1], exeCount),
               data,
               delimiter=',')
    def update(self, **kwargs):
        """
        Rewrite update to support pareto front sampling.
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'constraint_perfs' in kwargs
        super(MESMOC, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]

        self.Multiplemes = [None] * self.Y_dim
        self.Multiplemes_constraints = [None] * self.num_constraints
        for i in range(self.Y_dim):
            self.Multiplemes[i] = MaxvalueEntropySearch(self.model[i], self.X, self.Y[:, i],
                                                        random_state=self.random_state)
            self.Multiplemes[i].Sampling_RFM()
        for i in range(self.num_constraints):
            # Caution dim of self.constraint_perfs!
            self.Multiplemes_constraints[i] = MaxvalueEntropySearch(self.constraint_models[i],
                                                                    self.X, self.constraint_perfs[i])
            self.Multiplemes_constraints[i].Sampling_RFM()

        self.min_samples = []
        self.min_samples_constraints = []
        for j in range(self.sample_num):
            for i in range(self.Y_dim):
                self.Multiplemes[i].weigh_sampling()
            for i in range(self.num_constraints):
                self.Multiplemes_constraints[i].weigh_sampling()

            def CMO(xi):
                xi = np.asarray(xi)
                y = [self.Multiplemes[i].f_regression(xi)[0][0] for i in range(self.Y_dim)]
                y_c = [self.Multiplemes_constraints[i].f_regression(xi)[0][0] for i in range(self.num_constraints)]
                return y, y_c

            problem = Problem(self.X_dim, self.Y_dim, self.num_constraints)
            for k in range(self.X_dim):
                problem.types[k] = Real(self.bounds[k][0], self.bounds[k][1])  # todo other types
            problem.constraints[:] = "<=0"  # todo confirm
            problem.function = CMO
            algorithm = NSGAII(problem)
            algorithm.run(1500)
            cheap_pareto_front = [list(solution.objectives) for solution in algorithm.result]
            cheap_constraints_values = [list(solution.constraints) for solution in algorithm.result]
            # picking the min over the pareto: best case
            min_of_functions = [min(f) for f in list(zip(*cheap_pareto_front))]
            min_of_constraints = [min(f) for f in list(zip(*cheap_constraints_values))]  # todo confirm
            self.min_samples.append(min_of_functions)
            self.min_samples_constraints.append(min_of_constraints)
Example #21
0
    def run_nsgaii_bc():
        def CMO(xi):
            xi = np.asarray(xi)
            y = [branin(xi), Currin(xi)]
            return y

        problem = Problem(2, 2)
        problem.types[:] = Real(0, 1)
        problem.function = CMO
        algorithm = NSGAII(problem)
        algorithm.run(2500)
        cheap_pareto_front = np.array(
            [list(solution.objectives) for solution in algorithm.result])
        return cheap_pareto_front
Example #22
0
def generate_portfolios(buying_power: float, user_id: int, start_date: str,
                        end_date: str, num_portfs: int):
    # Setup stock universe
    universe = ReadUniverse(start_date, end_date)
    filtered_universe = filter_universe_trend(universe, start_date, end_date)
    if (filtered_universe == None or filtered_universe.count == 0):
        print(
            "Error filtering, please ensure correct start and end dates, and that they're valid trading days."
        )
        return None
    # Initialize problem class and genetic algorithm to be used, and run for 10000 evolutions
    problem = OptPortfolio(filtered_universe, buying_power)
    algorithm = NSGAII(problem)
    algorithm.run(1000)

    # Only keep solutions that are within constraints
    feasible_solutions = [s for s in algorithm.result if s.feasible]
    ports = num_portfs

    # If the number of feasible solutions is less than the number requested, use the length as the number to return
    if len(feasible_solutions) < num_portfs:
        ports = len(feasible_solutions)

    # Decode solutions back to integers for the number of requested portfolios
    solutions = [[
        problem.typeInt.decode(i) for i in feasible_solutions[j].variables
    ] for j in range(ports)]

    # Create allocation dictionary using ticker name as key, assuming uniform weights, for each solution
    portfolios = []
    for sol in solutions:
        alloc = {}
        shares = {}
        assets = []
        for i, asset in enumerate(filtered_universe.UniverseSet):
            if (sol[i] == 1):
                alloc[asset.Ticker] = 0.1
                shares[asset.Ticker] = (buying_power * 0.1) / asset.LastPrice
                assets.append(asset)

        portf = Portfolio(UserID=user_id,
                          BuyingPower=buying_power,
                          assets=assets,
                          AssetAlloc=alloc,
                          AssetShares=shares)
        portfolios.append(portf)

    return portfolios
Example #23
0
def main():
    # get config vars
    config = cli.init()
    population_size_nsgaii = config['POPULATION_SIZE_NSGAII']
    number_of_runs_nsgaii = config['NUMBER_OF_RUNS_NSGAII']
    number_of_runs_ga = config['NUMBER_OF_RUNS_GA']
    population_size_ga = config['POPULATION_SIZE_GA']
    config_path = config['TEST_DATA_PATH']
    ga_weights = config['GA_WEIGHTS']
    budget_constraint = config['BUDGET_CONSTRAINT']
    # parse and get specific data
    data = test_data.parse(config_path)
    requirements = data[0]
    clients = data[1]
    # run NSGA-II multi-objective algorithm
    print(datetime.datetime.now())
    print('Running NSGA-II...')
    NRP_multi = NRP_MOO(requirements, clients, budget_constraint)
    algorithm = NSGAII(NRP_multi.generate_problem(),
                       population_size=population_size_nsgaii)
    algorithm.run(number_of_runs_nsgaii)
    NSGAII_solutions = unique(nondominated(algorithm.result))
    # run GA single-objective algorithm with different weights
    GA_solutions = []
    for ga_weight in ga_weights:
        print(datetime.datetime.now())
        print('Running GA for weights ' + str(ga_weight) + ' and ' +
              str(1 - ga_weight) + '...')
        NRP_single = NRP_SOO(requirements, clients, budget_constraint,
                             ga_weight, 1 - ga_weight)
        algorithm = GeneticAlgorithm(NRP_single.generate_problem(),
                                     population_size=population_size_ga)
        algorithm.run(number_of_runs_ga)
        GA_solutions.extend(unique(nondominated(algorithm.result)))
    # run random algorithm
    print(datetime.datetime.now())
    print('Generating random solution...')
    NRP_random = NRP_Random(requirements, clients, budget_constraint)
    random_solutions = NRP_random.generate_solutions()
    print('done!')
    # draw graphs
    results.draw_graphs([
        results.get_graph_data_nsga_ii(NSGAII_solutions),
        results.get_graph_data_ga(GA_solutions, requirements, clients,
                                  budget_constraint),
        results.get_graph_data_ga(random_solutions, requirements, clients,
                                  budget_constraint)
    ])
Example #24
0
    def fit(self, X, y):
        X, y, = copy.deepcopy(X), copy.deepcopy(y)
        self.y = y
        y_bin = y
        self.X, self.y_bin = X, y
        # # start evolving in MOEA
        num_variables = (self.X.shape[1] + 1) * self.n_hidden
        algorithm = NSGAII(Objectives(num_variables,
                                      2,
                                      self.X,
                                      y_bin,
                                      self.n_hidden,
                                      sparse_degree=self.sparse_degree),
                           population_size=self.n_pop)

        # MOEAD(Objectives(num_variables, 2, self.X, y_bin, self.n_hidden, sparse_degree=self.sparse_degree),
        #               population_size=self.n_pop, neighborhood_size=int(self.n_pop/10))  # delta=0.5, eta=0.8
        algorithm.run(self.max_iter)
        self.evo_result = algorithm.result
        print('total solution:', algorithm.result.__len__())
        nondom_result = nondominated(algorithm.result)
        print('nondominated solution:', nondom_result.__len__())
        self.nondom_solution = nondom_result
        self.W = []
        self.B = []
        for i in range(nondom_result.__len__()):
            s = nondom_result[i]
            W = np.asarray(s.variables).reshape(self.X.shape[1] + 1,
                                                self.n_hidden)
            X_ = np.append(self.X, np.ones((self.X.shape[0], 1)), axis=1)
            H = expit(np.dot(X_, W))
            B = np.dot(linalg.pinv(H), y_bin)
            self.W.append(W)
            self.B.append(B)
            real_degree = H.mean(axis=0)  # n_hidden dim
            avg_activation = real_degree.mean()
            print('NO.', i, '  obj:', s.objectives, 'AVG activation:',
                  avg_activation)
        self.W = np.asarray(self.W)
        self.B = np.asarray(self.B)
        # # best W/B
        best_index = self.get_best_index()
        self.best_W = self.W[best_index]
        self.best_B = self.B[best_index]
        return self
Example #25
0
    def update(self, **kwargs):
        """
        Rewrite update to support pareto front sampling.
        """
        assert 'X' in kwargs and 'Y' in kwargs
        super(MESMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]

        self.Multiplemes = [None] * self.Y_dim
        for i in range(self.Y_dim):
            self.Multiplemes[i] = MaxvalueEntropySearch(
                self.model[i],
                self.X,
                self.Y[:, i],
                random_state=self.rng.randint(10000))
            self.Multiplemes[i].Sampling_RFM()

        self.min_samples = []
        for j in range(self.sample_num):
            for i in range(self.Y_dim):
                self.Multiplemes[i].weigh_sampling()

            def CMO(xi):
                xi = np.asarray(xi)
                y = [
                    self.Multiplemes[i].f_regression(xi)[0][0]
                    for i in range(self.Y_dim)
                ]
                return y

            problem = Problem(self.X_dim, self.Y_dim)
            set_problem_types(self.config_space, problem)
            problem.function = CMO

            variator = get_variator(self.config_space)
            algorithm = NSGAII(problem, population_size=100, variator=variator)
            algorithm.run(1500)
            cheap_pareto_front = [
                list(solution.objectives) for solution in algorithm.result
            ]
            # picking the min over the pareto: best case
            min_of_functions = [min(f) for f in list(zip(*cheap_pareto_front))]
            self.min_samples.append(min_of_functions)
    def update(self, **kwargs):
        """
        Rewrite update
        """
        assert 'X' in kwargs and 'Y' in kwargs
        assert 'eta' in kwargs and 'num_data' in kwargs
        super(USeMO, self).update(**kwargs)

        self.X_dim = self.X.shape[1]
        self.Y_dim = self.Y.shape[1]
        assert self.Y_dim > 1

        # update single acquisition function
        for i in range(self.Y_dim):
            self.single_acq[i].update(model=self.model[i],
                                      eta=self.eta[i],
                                      num_data=self.num_data)
            self.uncertainty_acq[i].update(model=self.model[i],
                                           eta=self.eta[i],
                                           num_data=self.num_data)

        def CMO(x):
            x = np.asarray(x)
            # minimize negative acq
            return [-self.single_acq[i](x, convert=False)[0][0] for i in range(self.Y_dim)]

        problem = Problem(self.X_dim, self.Y_dim)
        for k in range(self.X_dim):
            problem.types[k] = Real(self.bounds[k][0], self.bounds[k][1])  # todo other types
        problem.function = CMO
        algorithm = NSGAII(problem)  # todo population_size
        algorithm.run(2500)
        cheap_pareto_set = [solution.variables for solution in algorithm.result]
        # cheap_pareto_set_unique = []
        # for i in range(len(cheap_pareto_set)):
        #     if not any((cheap_pareto_set[i] == x).all() for x in self.X):   # todo convert problem? no this step?
        #         cheap_pareto_set_unique.append(cheap_pareto_set[i])
        cheap_pareto_set_unique = cheap_pareto_set

        single_uncertainty = np.array([self.uncertainty_acq[i](np.asarray(cheap_pareto_set_unique), convert=False)
                                       for i in range(self.Y_dim)])  # shape=(Y_dim, N, 1)
        single_uncertainty = single_uncertainty.reshape(self.Y_dim, -1)  # shape=(Y_dim, N)
        self.uncertainties = np.prod(single_uncertainty, axis=0)  # shape=(Y_dim,) todo normalize?
        self.candidates = np.array(cheap_pareto_set_unique)
def NSGA_test(data_name, input_dim, output_dim, x_bounds, epoch, population):
    """
    bench_mark functionのjson
    "function_name":{
        "hypervolume":,
        "v_ref":[],
        "w_ref":[],
        "x_bounds":[],
        "input_dim":,
        "output_dim":
    }
    """

    problem = Problem(input_dim, output_dim)
    problem.types[:] = [
        Real(x_bounds[i][0], x_bounds[i][1]) for i in range(input_dim)
    ]
    problem.function = eval(data_name)
    algorithm = NSGAII(problem, population_size=population)
    start = time.perf_counter()
    algorithm.run(epoch)
    end = time.perf_counter() - start
    v_ref = []
    w_ref = []
    w_ref_norm = []
    print(len(algorithm.result))
    pareto_frontier = np.zeros((len(algorithm.result), output_dim))
    pareto_frontier_norm = np.zeros((len(algorithm.result), output_dim))
    for i in range(output_dim):
        frontier_i = np.array([s.objectives[i] for s in algorithm.result])
        pareto_frontier[:, i] = frontier_i
        min_i = np.min(frontier_i)
        max_i = np.max(frontier_i)
        frontier_i_norm = (frontier_i - min_i) / (max_i - min_i)
        pareto_frontier_norm[:, i] = frontier_i_norm
        v_ref.append(min_i)
        w_ref.append(max_i)
        w_ref_norm.append(1)

    hyp = Hypervolume(minimum=v_ref, maximum=w_ref)
    HV = hyp(algorithm.result)
    return HV, end
def multi_objective(requirements, budget):
    problem = ReleaseProblem(requirements,
                             budget * sum(req.cost for req in requirements))
    alg = NSGAII(problem)
    alg.run(10000)

    plt.scatter([s.objectives[0] for s in alg.result],
                [s.objectives[1] for s in alg.result])
    plt.xlim([
        min(s.objectives[0] for s in alg.result),
        max(s.objectives[0] for s in alg.result)
    ])
    plt.ylim([
        max(s.objectives[1] for s in alg.result),
        min(s.objectives[1] for s in alg.result)
    ])
    plt.xlabel("Value")
    plt.ylabel("Cost")
    plt.show()
    return alg.result
Example #29
0
def generate_portfolio(universe: Universe, buying_power: float, user_id: int):
    algorithm = NSGAII(OptPortfolio(universe, buying_power))
    algorithm.run(1000)

    feasible_solutions = [s for s in algorithm.result if s.feasible]

    sol = feasible_solutions[0]

    alloc = {}
    for i in range(universe.count):
        alloc[universe.universe_set[i].ticker] = sol.variables[i]

    print(alloc)

    portf = Portfolio(user_id=user_id,
                      buying_power=buying_power * (1 - sum(sol.variables)),
                      assets=universe.universe_set,
                      asset_alloc=alloc)

    return portf
def single_objective(requirements, budget):
    weights = np.arange(0.1, 1, 0.05)
    results = []

    for weight in weights:
        problem = SingleObjReleaseProblem(
            requirements, budget * sum(req.cost for req in requirements),
            weight)
        alg = NSGAII(problem)
        alg.run(10000)

        results.append(alg.result[0:10])

    cost_value_list = []
    for result_list in results:
        for result in result_list:
            sum_value = 0
            sum_cost = 0

            for i in range(0, len(result.variables) - 1):
                if result.variables[i][0]:
                    sum_value += requirements[i].value
                    sum_cost += requirements[i].cost
            cost_value_list.append([sum_value, sum_cost])

    plt.scatter([r[0] for r in cost_value_list],
                [r[1] for r in cost_value_list])
    plt.xlim([
        min(r[0] for r in cost_value_list) - 1,
        max(r[0] for r in cost_value_list) + 1
    ])
    plt.ylim([
        max(r[1] for r in cost_value_list) + 1,
        min(r[1] for r in cost_value_list) - 1
    ])
    plt.xlabel("Value")
    plt.ylabel("Cost")
    plt.show()
    return results
Example #31
0
from platypus import NSGAII, Problem, Real

class Schaffer(Problem):

    def __init__(self):
        super(Schaffer, self).__init__(1, 2)
        self.types[:] = Real(-10, 10)
    
    def evaluate(self, solution):
        x = solution.variables[:]
        solution.objectives[:] = [x[0]**2, (x[0]-2)**2]

algorithm = NSGAII(Schaffer())
algorithm.run(10000)