コード例 #1
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(x):
        res = problem.evaluate(x)
        return np.array(res['objs']).reshape(1, -1)

    # random seed
    np.random.seed(seed)

    # Initial evaluations
    X_init = gpflowopt.design.LatinHyperCube(initial_runs, domain).generate()
    # X_init = gpflowopt.design.RandomDesign(initial_runs, domain).generate()
    # fix numeric problem
    if hasattr(problem, 'lb') and hasattr(problem, 'ub'):
        eps = 1e-8
        X_init = np.maximum(X_init, problem.lb + eps)
        X_init = np.minimum(X_init, problem.ub - eps)
    Y_init = np.vstack([objective_function(X_init[i, :]) for i in range(X_init.shape[0])])

    # One model for each objective
    objective_models = [gpflow.gpr.GPR(X_init.copy(), Y_init[:, [i]].copy(),
                                       gpflow.kernels.Matern52(domain.size, ARD=True))
                        for i in range(Y_init.shape[1])]
    for model in objective_models:
        model.likelihood.variance = 0.01

    hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
    # First setup the optimization strategy for the acquisition function
    # Combining MC step followed by L-BFGS-B
    acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, optimizer_mc_times),
                                                       gpflowopt.optim.SciPyOptimizer(domain)])

    # Then run the BayesianOptimizer for (max_runs-init_num) iterations
    optimizer = BayesianOptimizer_modified(domain, hvpoi, optimizer=acquisition_opt, verbose=True)
    result = optimizer.optimize(objective_function, n_iter=max_runs-initial_runs)

    # Save result
    # pf = optimizer.acquisition.pareto.front.value
    # pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])
    pf = gpflowopt.pareto.Pareto(optimizer.acquisition.data[1]).front.value
    X, Y = optimizer.acquisition.data
    time_list = [0.] * initial_runs + optimizer.time_list
    hv_diffs = []
    for i in range(Y.shape[0]):
        # hv = gpflowopt.pareto.Pareto(Y[:i+1]).hypervolume(problem.ref_point)    # ref_point problem
        hv = Hypervolume(problem.ref_point).compute(Y[:i+1])
        hv_diff = problem.max_hv - hv
        hv_diffs.append(hv_diff)

    # plot for debugging
    if plot_mode == 1:
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, X, Y, time_list
コード例 #2
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        res = problem.evaluate_config(config)
        res['config'] = config
        return res

    bo = SMBO(
        objective_function,
        cs,
        num_objs=problem.num_objs,
        num_constraints=0,
        surrogate_type=surrogate_type,  # default: gp
        acq_type=acq_type,  # default: ehvi
        acq_optimizer_type=acq_optimizer_type,  # default: random_scipy
        initial_runs=initial_runs,  # default: 2 * (problem.dim + 1)
        init_strategy=init_strategy,  # default: sobol
        max_runs=max_runs,
        ref_point=problem.ref_point,
        time_limit_per_trial=time_limit_per_trial,
        task_id=task_id,
        random_state=seed)

    # bo.run()
    hv_diffs = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, _, objs = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, 'time=', global_time)
        hv = Hypervolume(problem.ref_point).compute(
            bo.get_history().get_pareto_front())
        hv_diff = problem.max_hv - hv
        print(seed, i, 'hypervolume =', hv)
        print(seed, i, 'hv diff =', hv_diff)
        hv_diffs.append(hv_diff)
        time_list.append(global_time)
    config_list = bo.get_history().configurations
    perf_list = bo.get_history().perfs
    pf = np.asarray(bo.get_history().get_pareto_front())

    # plot for debugging
    if plot_mode == 1:
        Y_init = None
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, config_list, perf_list, time_list
コード例 #3
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(x: torch.Tensor):
        # Caution: unnormalize and maximize
        x = unnormalize(x, bounds=problem_bounds)
        x = x.cpu().numpy().astype(np.float64)  # caution
        res = problem.evaluate(x)
        objs = [-y for y in res['objs']]
        return objs

    hv_diffs = []
    time_list = []
    global_start_time = time.time()

    # random seed
    np.random.seed(seed)
    torch.manual_seed(seed)

    # call helper functions to generate initial training data and initialize model
    train_x, train_obj = generate_initial_data(initial_runs,
                                               objective_function, time_list,
                                               global_start_time)
    mll, model = initialize_model(train_x, train_obj)

    # for plot
    X_init = train_x.cpu().numpy().astype(np.float64)
    Y_init = -1 * train_obj.cpu().numpy().astype(np.float64)
    # calculate hypervolume of init data
    for i in range(initial_runs):
        train_obj_i = train_obj[:i + 1]
        # compute pareto front
        pareto_mask = is_non_dominated(train_obj_i)
        pareto_y = train_obj_i[pareto_mask]
        # compute hypervolume
        volume = hv.compute(pareto_y)
        hv_diff = problem.max_hv - volume
        hv_diffs.append(hv_diff)

    # run (max_runs - initial_runs) rounds of BayesOpt after the initial random batch
    for iteration in range(initial_runs + 1, max_runs + 1):
        t0 = time.time()
        try:
            # fit the models
            fit_gpytorch_model(mll)

            # define the qEHVI acquisition modules using a QMC sampler
            sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
            # partition non-dominated space into disjoint rectangles
            partitioning = NondominatedPartitioning(
                num_outcomes=problem.num_objs, Y=train_obj)
            qEHVI = qExpectedHypervolumeImprovement(
                model=model,
                ref_point=problem.ref_point.tolist(
                ),  # use known reference point
                partitioning=partitioning,
                sampler=sampler,
            )
            # optimize and get new observation
            new_x, new_obj = optimize_acqf_and_get_observation(
                qEHVI, objective_function, time_list, global_start_time)
        except Exception as e:
            step = 2
            print(
                '===== Exception in optimization loop, restart with 1/%d of training data: %s'
                % (step, str(e)))
            if refit == 1:
                mll, model = initialize_model(train_x[::step],
                                              train_obj[::step])
            else:
                mll, model = initialize_model(
                    train_x[::step],
                    train_obj[::step],
                    model.state_dict(),
                )
            # fit the models
            fit_gpytorch_model(mll)

            # define the qEHVI acquisition modules using a QMC sampler
            sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
            # partition non-dominated space into disjoint rectangles
            partitioning = NondominatedPartitioning(
                num_outcomes=problem.num_objs, Y=train_obj[::step])
            qEHVI = qExpectedHypervolumeImprovement(
                model=model,
                ref_point=problem.ref_point.tolist(
                ),  # use known reference point
                partitioning=partitioning,
                sampler=sampler,
            )
            # optimize and get new observation
            new_x, new_obj = optimize_acqf_and_get_observation(
                qEHVI, objective_function, time_list, global_start_time)
            assert len(time_list) == iteration

        # update training points
        train_x = torch.cat([train_x, new_x])
        train_obj = torch.cat([train_obj, new_obj])

        # update progress
        # compute pareto front
        pareto_mask = is_non_dominated(train_obj)
        pareto_y = train_obj[pareto_mask]
        # compute hypervolume
        volume = hv.compute(pareto_y)
        hv_diff = problem.max_hv - volume
        hv_diffs.append(hv_diff)

        # reinitialize the models so they are ready for fitting on next iteration
        # use the current state dict to speed up fitting
        # Note: they find improved performance from not warm starting the model hyperparameters
        # using the hyperparameters from the previous iteration
        if refit == 1:
            mll, model = initialize_model(train_x, train_obj)
        else:
            mll, model = initialize_model(
                train_x,
                train_obj,
                model.state_dict(),
            )

        t1 = time.time()
        print(
            "Iter %d: x=%s, perf=%s, hv_diff=%f, time=%.2f, global_time=%.2f" %
            (iteration, unnormalize(new_x, bounds=problem_bounds), -new_obj,
             hv_diff, t1 - t0, time_list[-1]),
            flush=True)

    # Save result
    X = unnormalize(train_x, bounds=problem_bounds).cpu().numpy().astype(
        np.float64)  # caution
    Y = -1 * train_obj.cpu().numpy().astype(np.float64)
    # compute pareto front
    pareto_mask = is_non_dominated(train_obj)
    pareto_y = train_obj[pareto_mask]
    pf = -1 * pareto_y.cpu().numpy().astype(np.float64)

    # plot for debugging
    if plot_mode == 1:
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, X, Y, time_list