Esempio n. 1
0
    def add(self, config: Configuration, perf: List[Perf]):
        if self.num_objs is None:
            self.num_objs = len(perf)
            self.mo_incumbent_value = [MAXINT] * self.num_objs
            self.mo_incumbents = [list()] * self.num_objs

        assert self.num_objs == len(perf)

        if config in self.data:
            self.logger.warning('Repeated configuration detected!')
            return

        self.data[config] = perf
        self.config_counter += 1

        # update pareto
        remove_config = []
        for pareto_config, pareto_perf in self.pareto.items(
        ):  # todo efficient way?
            if all(pp <= p for pp, p in zip(pareto_perf, perf)):
                break
            elif all(p <= pp for pp, p in zip(pareto_perf, perf)):
                remove_config.append(pareto_config)
        else:
            self.pareto[config] = perf
            self.logger.info('Update pareto: %s, %s.' %
                             (str(config), str(perf)))

        for conf in remove_config:
            self.logger.info('Remove from pareto: %s, %s.' %
                             (str(conf), str(self.pareto[conf])))
            self.pareto.pop(conf)

        # update mo_incumbents
        for i in range(self.num_objs):
            if len(self.mo_incumbents[i]) > 0:
                if perf[i] < self.mo_incumbent_value[i]:
                    self.mo_incumbents[i].clear()
                if perf[i] <= self.mo_incumbent_value[i]:
                    self.mo_incumbents[i].append((config, perf[i], perf))
                    self.mo_incumbent_value[i] = perf[i]
            else:
                self.mo_incumbent_value[i] = perf[i]
                self.mo_incumbents[i].append((config, perf[i], perf))

        # Calculate current hypervolume if reference point is provided
        if self.ref_point is not None:
            pareto_front = self.get_pareto_front()
            if pareto_front:
                hv = Hypervolume(
                    ref_point=self.ref_point).compute(pareto_front)
            else:
                hv = 0
            print('-' * 30)
            print('Current HV is %f' % hv)
            self.hv_data.append(hv)
Esempio n. 2
0
 def compute_hypervolume(self, ref_point=None):
     if ref_point is None:
         ref_point = self.ref_point
     assert ref_point is not None
     pareto_front = self.get_pareto_front()
     if pareto_front:
         hv = Hypervolume(ref_point=ref_point).compute(pareto_front)
     else:
         hv = 0
     return hv
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(x):
        res = problem.evaluate(x)
        return np.array(res['objs']).reshape(1, -1)

    # random seed
    np.random.seed(seed)

    # Initial evaluations
    X_init = gpflowopt.design.LatinHyperCube(initial_runs, domain).generate()
    # X_init = gpflowopt.design.RandomDesign(initial_runs, domain).generate()
    # fix numeric problem
    if hasattr(problem, 'lb') and hasattr(problem, 'ub'):
        eps = 1e-8
        X_init = np.maximum(X_init, problem.lb + eps)
        X_init = np.minimum(X_init, problem.ub - eps)
    Y_init = np.vstack([objective_function(X_init[i, :]) for i in range(X_init.shape[0])])

    # One model for each objective
    objective_models = [gpflow.gpr.GPR(X_init.copy(), Y_init[:, [i]].copy(),
                                       gpflow.kernels.Matern52(domain.size, ARD=True))
                        for i in range(Y_init.shape[1])]
    for model in objective_models:
        model.likelihood.variance = 0.01

    hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
    # First setup the optimization strategy for the acquisition function
    # Combining MC step followed by L-BFGS-B
    acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, optimizer_mc_times),
                                                       gpflowopt.optim.SciPyOptimizer(domain)])

    # Then run the BayesianOptimizer for (max_runs-init_num) iterations
    optimizer = BayesianOptimizer_modified(domain, hvpoi, optimizer=acquisition_opt, verbose=True)
    result = optimizer.optimize(objective_function, n_iter=max_runs-initial_runs)

    # Save result
    # pf = optimizer.acquisition.pareto.front.value
    # pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])
    pf = gpflowopt.pareto.Pareto(optimizer.acquisition.data[1]).front.value
    X, Y = optimizer.acquisition.data
    time_list = [0.] * initial_runs + optimizer.time_list
    hv_diffs = []
    for i in range(Y.shape[0]):
        # hv = gpflowopt.pareto.Pareto(Y[:i+1]).hypervolume(problem.ref_point)    # ref_point problem
        hv = Hypervolume(problem.ref_point).compute(Y[:i+1])
        hv_diff = problem.max_hv - hv
        hv_diffs.append(hv_diff)

    # plot for debugging
    if plot_mode == 1:
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, X, Y, time_list
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        res = problem.evaluate_config(config)
        res['config'] = config
        return res

    bo = SMBO(objective_function, cs,
              num_objs=problem.num_objs,
              num_constraints=0,
              surrogate_type=surrogate_type,            # default: gp
              acq_type=acq_type,                        # default: ehvi
              acq_optimizer_type=acq_optimizer_type,    # default: random_scipy
              initial_runs=initial_runs,                # default: 2 * (problem.dim + 1)
              init_strategy=init_strategy,              # default: sobol
              max_runs=max_runs,
              ref_point=problem.ref_point,
              time_limit_per_trial=time_limit_per_trial, task_id=task_id, random_state=seed)

    # bo.run()
    hv_diffs = []
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, objs, trial_info = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, trial_info, 'time=', global_time)
        hv = Hypervolume(problem.ref_point).compute(bo.get_history().get_pareto_front())
        hv_diff = problem.max_hv - hv
        print(seed, i, 'hypervolume =', hv)
        print(seed, i, 'hv diff =', hv_diff)
        hv_diffs.append(hv_diff)
        config_list.append(config)
        perf_list.append(objs)
        time_list.append(global_time)
    pf = np.asarray(bo.get_history().get_pareto_front())

    # plot for debugging
    if plot_mode == 1:
        Y_init = None
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, config_list, perf_list, time_list
                                                    hvpoi,
                                                    optimizer=acquisition_opt,
                                                    verbose=True)
            result = optimizer.optimize(multi_objective_func,
                                        n_iter=max_runs - init_num)

            pf = optimizer.acquisition.pareto.front.value
            # pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])
            # print(hvpoi.data[1])

            # Save result
            data = optimizer.acquisition.data  # data=(X, Y)
            hv_diffs = []
            for i in range(data[1].shape[0]):
                # hv = gpflowopt.pareto.Pareto(data[1][:i+1]).hypervolume(referencePoint)    # ref_point problem
                hv = Hypervolume(referencePoint).compute(data[1][:i + 1])
                hv_diff = real_hv - hv
                hv_diffs.append(hv_diff)
            print(seed, mth, 'pareto num:', pf.shape[0])
            print(seed, 'real hv =', real_hv)
            print(seed, 'hv_diffs:', hv_diffs)

            timestamp = time.strftime('%Y-%m-%d-%H-%M-%S',
                                      time.localtime(time.time()))
            dir_path = 'logs/mo_benchmark_%s_%d/%s/' % (problem_str, max_runs,
                                                        mth)
            file = 'benchmark_%s_%04d_%s.pkl' % (mth, seed, timestamp)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            with open(os.path.join(dir_path, file), 'wb') as f:
                save_item = (hv_diffs, pf, data)
Esempio n. 6
0
        # initial_configurations=X_init, initial_runs=10,
        time_limit_per_trial=time_limit_per_trial,
        task_id='mo',
        random_state=seed)
    bo.config_advisor.optimizer.random_chooser.prob = rand_prob  # set rand_prob, default 0
    bo.config_advisor.acquisition_function.sample_num = sample_num  # set sample_num
    bo.config_advisor.acquisition_function.random_state = seed  # set random_state
    bo.config_advisor.optimizer.num_mc = 10000  # MESMO optimizer only
    bo.config_advisor.optimizer.num_opt = 10  # MESMO optimizer only
    print(mth, '===== start =====')
    # bo.run()
    hv_diffs = []
    for i in range(max_runs):
        config, trial_state, objs, trial_info = bo.iterate()
        print(i, objs, config)
        hv = Hypervolume(referencePoint).compute(
            bo.get_history().get_pareto_front())
        print(i, 'hypervolume =', hv)
        hv_diff = real_hv - hv
        hv_diffs.append(hv_diff)
        print(i, 'hv diff =', hv_diff)

    # Print result
    pf = np.asarray(bo.get_history().get_pareto_front())
    print(mth, 'pareto num:', pf.shape[0])
    print('real hv =', real_hv)
    print('hv_diffs:', hv_diffs)

    # Evaluate the random search.
    bo_r = SMBO(multi_objective_func,
                cs,
                num_objs=num_objs,
Esempio n. 7
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        res = problem.evaluate_config(config)
        res['config'] = config
        res['objs'] = np.asarray(res['objs']).tolist()
        res['constraints'] = np.asarray(res['constraints']).tolist()
        return res

    bo = SMBO(
        objective_function,
        cs,
        num_objs=problem.num_objs,
        num_constraints=problem.num_constraints,
        surrogate_type=surrogate_type,  # default: gp
        acq_type=acq_type,  # default: ehvic
        acq_optimizer_type=acq_optimizer_type,  # default: random_scipy
        initial_runs=initial_runs,  # default: 2 * (problem.dim + 1)
        init_strategy=init_strategy,  # default: sobol
        max_runs=max_runs,
        ref_point=problem.ref_point,
        time_limit_per_trial=time_limit_per_trial,
        task_id=task_id,
        random_state=seed)

    # bo.run()
    hv_diffs = []
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, origin_objs, trial_info = bo.iterate()
        global_time = time.time() - global_start_time
        constraints = [
            bo.config_advisor.constraint_perfs[i][-1]
            for i in range(problem.num_constraints)
        ]
        if any(c > 0 for c in constraints):
            objs = [9999999.0] * problem.num_objs
        else:
            objs = origin_objs
        print(seed, i, origin_objs, objs, constraints, config, trial_state,
              trial_info, 'time=', global_time)
        assert len(bo.config_advisor.constraint_perfs[0]
                   ) == i + 1  # make sure no repeat or failed config
        config_list.append(config)
        perf_list.append(objs)
        time_list.append(global_time)
        hv = Hypervolume(problem.ref_point).compute(perf_list)
        hv_diff = problem.max_hv - hv
        hv_diffs.append(hv_diff)
        print(seed, i, 'hypervolume =', hv)
        print(seed, i, 'hv diff =', hv_diff)
    pf = np.asarray(bo.get_history().get_pareto_front())

    # plot for debugging
    if plot_mode == 1:
        Y_init = None
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, config_list, perf_list, time_list
Esempio n. 8
0
bounds = problem.bounds


def CMO(xi):
    xi = np.asarray(xi)
    res = problem.evaluate(xi)
    return res['objs'], res['constraints']


t0 = time.time()
nsgaii_problem = Problem(dim, num_objs, num_constraints)
for k in range(dim):
    nsgaii_problem.types[k] = Real(bounds[k][0], bounds[k][1])
nsgaii_problem.constraints[:] = "<=0"
nsgaii_problem.function = CMO
algorithm = NSGAII(nsgaii_problem, population_size=1000)
algorithm.run(20000)

cheap_pareto_front = np.array(
    [list(solution.objectives) for solution in algorithm.result])
cheap_constraints_values = np.array(
    [list(solution.constraints) for solution in algorithm.result])
print('pf shape =', cheap_pareto_front.shape, cheap_constraints_values.shape)

hv = Hypervolume(problem.ref_point).compute(cheap_pareto_front)
t1 = time.time()
print('ref point =', problem.ref_point)
print('nsgaii hv =', hv)
print('time =', t1 - t0)
plot_pf(problem, problem_str, 'nsgaii', cheap_pareto_front, None)
Esempio n. 9
0
 if file.startswith('benchmark_%s_' % (mth)) and file.endswith('.pkl'):
     with open(os.path.join(dir_path, file), 'rb') as f:
         save_item = pkl.load(f)
         hv_diffs, pf, data = save_item
     if recalc == 1:
         if mth.startswith('gpflowopt'):
             y = data[1]
         else:  # todo
             y = np.array(list(data.values()))
         hv_diffs = []
         history_container = MOHistoryContainer(None)
         for i in range(y.shape[0]):
             history_container.add(i, y[i].tolist())
             pf = history_container.get_pareto_front(
             )  # avoid greater than referencePoint
             hv = Hypervolume(referencePoint).compute(pf)
             hv_diff = real_hv - hv
             hv_diffs.append(hv_diff)
     elif recalc == 2:  # only calculate final result
         if mth.startswith('gpflowopt'):
             y = data[1]
         else:  # todo
             y = np.array(list(data.values()))
         history_container = MOHistoryContainer(None)
         for i in range(y.shape[0]):
             history_container.add(i, y[i].tolist())
         pf = history_container.get_pareto_front()
         hv = Hypervolume(referencePoint).compute(pf)
         hv_diff = real_hv - hv
         hv_diffs = [hv_diff] * max_runs
     if len(hv_diffs) != max_runs: