コード例 #1
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = BayesianOptimizationLoop(model=model,
                                  space=space,
                                  acquisition=acquisition)
    bo.run_loop(UserFunctionWrapper(f),
                FixedIterationsStoppingCondition(n_iterations))

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5

    # Check the obtained results
    results = bo.get_results()

    assert results.minimum_location.shape[0] == 1
    assert results.best_found_value_per_iteration.shape[0] == n_iterations + 5
コード例 #2
0
    def __init__(self,
                 model: VanillaBayesianQuadrature,
                 acquisition: Acquisition = None,
                 model_updater: ModelUpdater = None):
        """
        The loop for vanilla Bayesian Quadrature

        :param model: the vanilla Bayesian quadrature method
        :param acquisition: The acquisition function that is be used to collect new points.
        default, IntegralVarianceReduction
        :param model_updater: Defines how and when the quadrature model is updated if new data arrives.
                              Defaults to updating hyper-parameters every iteration.
        """

        self.model = model

        if acquisition is None:
            acquisition = IntegralVarianceReduction(self.model)

        if model_updater is None:
            model_updater = FixedIntervalUpdater(self.model, 1)

        space = ParameterSpace(self.model.integral_bounds.
                               convert_to_list_of_continuous_parameters())
        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(
            acquisition, acquisition_optimizer)
        loop_state = create_loop_state(self.model.X, self.model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)
コード例 #3
0
def test_batch_loop_fails_without_gradients_implemented():
    parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    model = mock.create_autospec(IModel)

    base_acquisition = ExpectedImprovement(model)

    batch_size = 10

    with pytest.raises(ValueError):
        BayesianOptimizationLoop(parameter_space, model, base_acquisition,
                                 batch_size)
コード例 #4
0
def test_loop_initial_state():
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    exp_design = ExperimentalDesignLoop(space, model)

    # test loop state initialization
    assert_array_equal(exp_design.loop_state.X, x_init)
    assert_array_equal(exp_design.loop_state.Y, y_init)
コード例 #5
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition = ModelVariance(model)

    # Make loop and collect points
    exp_design = ExperimentalDesignLoop(space, model, acquisition)
    exp_design.run_loop(UserFunctionWrapper(f), FixedIterationsStoppingCondition(n_iterations))

    # Check we got the correct number of points
    assert exp_design.loop_state.X.shape[0] == 10
コード例 #6
0
    def __init__(
        self,
        model: Union[IModel, IEntropySearchModel],
        space: ParameterSpace,
        target_fidelity_index: int = None,
        num_samples: int = 100,
        num_representer_points: int = 50,
        burn_in_steps: int = 50,
    ):
        """
        :param model: Gaussian process model of the objective function that implements IEntropySearchModel
        :param space: Parameter space of the input domain
        :param target_fidelity_index: The index of the parameter which defines the fidelity
        :param num_samples: Integer determining how many samples to draw for each candidate input
        :param num_representer_points: Integer determining how many representer points to sample
        :param burn_in_steps: Integer that defines the number of burn-in steps when sampling the representer points
        """

        # Find fidelity parameter in parameter space
        if target_fidelity_index is None:
            self.target_fidelity_index = len(space.parameters) - 1
        else:
            self.target_fidelity_index = target_fidelity_index
        self.fidelity_parameter = space.parameters[self.target_fidelity_index]
        self.high_fidelity = self.fidelity_parameter.max

        # Sampler of representer points should sample x location at the highest fidelity
        parameters_without_info_source = space.parameters.copy()
        parameters_without_info_source.remove(self.fidelity_parameter)
        space_without_info_source = ParameterSpace(
            parameters_without_info_source)

        # Create sampler of representer points
        sampler = AffineInvariantEnsembleSampler(space_without_info_source)

        proposal_func = self._get_proposal_function(model, space)

        super().__init__(model, space, sampler, num_samples,
                         num_representer_points, proposal_func, burn_in_steps)
コード例 #7
0
ファイル: run_exp_bq.py プロジェクト: ikanher/AHGP
def bq_loop(config, ai_model=None):
    user_function, integral_bounds = eval(config.name)()
    lb = integral_bounds[0][0]  # lower bound
    ub = integral_bounds[0][1]  # upper bound

    data_dim = config.data_dim
    init_num_data = config.init_num_data
    interval_std = config.interval_std
    interval = np.zeros((1, data_dim))
    std = np.zeros((1, data_dim))
    mean = np.zeros((1, data_dim))
    integral_bounds_scaled = integral_bounds.copy()
    for ii in range(data_dim):
        interval[0, ii] = integral_bounds[ii][1] - integral_bounds[ii][0]
        std[0, ii] = interval[0, ii] / interval_std
        mean[0, ii] = (integral_bounds[ii][1] + integral_bounds[ii][0]) / 2
        integral_bounds_scaled[ii] = ((integral_bounds[ii] - mean[0, ii]) /
                                      std[0, ii]).tolist()

    lb_scaled = integral_bounds_scaled[0][0]  # lower bound
    ub_scaled = integral_bounds_scaled[0][1]  # upper bound
    lb = integral_bounds[0][0]  # lower bound
    ub = integral_bounds[0][1]  # upper bound

    results_list = [None] * config.repeated_runs
    npr = np.random.RandomState(config.seed)
    for kk in tqdm(range(config.repeated_runs)):

        integral_mean_list = np.zeros(config.bq_iter + 1)
        integral_std_list = np.zeros(config.bq_iter + 1)
        #initialize data points
        X_init = (npr.rand(init_num_data, data_dim) - 0.5) * interval + mean
        Y_init = user_function.f(X_init)
        X_init_norm = (X_init - mean) / std

        Y_init_norm, mean_Y, std_Y = standardize(Y_init)

        X = X_init_norm
        Y = Y_init_norm
        X[np.abs(X) < 1.0e-5] = 1.0e-5
        #normalized function
        function_norm = lambda x: (user_function.f(x * std + mean) - mean_Y
                                   ) / std_Y

        if data_dim == 1:
            ground_truth = univariate_approximate_ground_truth_integral(
                function_norm, (lb_scaled, ub_scaled))[0]
        elif data_dim == 2:
            ground_truth = bivariate_approximate_ground_truth_integral(
                function_norm, integral_bounds_scaled)[0]

        #Set up Emukit_BO_BQ_GP_Model
        emukit_gp_model = Emukit_BO_BQ_GP_Model(X_init_norm, Y_init_norm,
                                                config, ai_model)
        emukit_gp_model.optimize()
        emukit_gp_model.set_kernel()
        emukit_quad_kern = QuadratureKernelCustom(emukit_gp_model,
                                                  integral_bounds_scaled)
        emukit_model = BaseGaussianProcessCustomModel(kern=emukit_quad_kern,
                                                      gp_model=emukit_gp_model)
        emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model,
                                                  X=X,
                                                  Y=Y)

        #set up Bayesian quadrature
        if config.plot:
            x_plot = np.linspace(integral_bounds_scaled[0][0],
                                 integral_bounds_scaled[0][1], 300)[:, None]
            y_plot = function_norm(x_plot)

            mu_plot, var_plot = emukit_method.predict(x_plot)

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(X_init_norm,
                     Y_init_norm,
                     "ro",
                     markersize=10,
                     label="Observations")
            plt.plot(x_plot, y_plot, "k", label="The Integrand")
            plt.plot(x_plot, mu_plot, "C0", label="Model")
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.6)
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.4)
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.2)
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$x$")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.xlim(lb_scaled, ub_scaled)
            plt.show()

        initial_integral_mean, initial_integral_variance = emukit_method.integrate(
        )
        integral_mean_list[0] = initial_integral_mean
        integral_std_list[0] = np.sqrt(initial_integral_variance)

        if config.plot:

            x_plot_integral = np.linspace(
                initial_integral_mean - 3 * np.sqrt(initial_integral_variance),
                initial_integral_mean + 3 * np.sqrt(initial_integral_variance),
                200)
            y_plot_integral_initial = 1/np.sqrt(initial_integral_variance * 2 * np.pi) * \
            np.exp( - (x_plot_integral - initial_integral_mean)**2 / (2 * initial_integral_variance) )
            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(x_plot_integral,
                     y_plot_integral_initial,
                     "k",
                     label="initial integral density")
            plt.axvline(initial_integral_mean, color="red", label="initial integral estimate", \
                        linestyle="--")
            plt.axvline(ground_truth, color="blue", label="ground truth integral", \
                        linestyle="--")
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$F$")
            plt.ylabel(r"$p(F)$")
            plt.grid(True)
            plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
            plt.show()

        print('The initial estimated integral is: ',
              round(initial_integral_mean, 4))
        print('with a credible interval: ',
              round(2 * np.sqrt(initial_integral_variance), 4), '.')
        print('The ground truth rounded to 2 digits for comparison is: ',
              round(ground_truth, 4), '.')

        for ii in range(config.bq_iter):
            time_count = 0
            result = {}
            ivr_acquisition = IntegralVarianceReduction(emukit_method)
            space = ParameterSpace(emukit_method.reasonable_box_bounds.
                                   convert_to_list_of_continuous_parameters())
            num_steps = 200
            num_init_points = 5
            optimizer = LocalSearchAcquisitionOptimizer(
                space, num_steps, num_init_points)
            x_new, _ = optimizer.optimize(ivr_acquisition)
            y_new = function_norm(x_new)
            X = np.append(X, x_new, axis=0)
            Y = np.append(Y, y_new, axis=0)
            X[np.abs(X) < 1.0e-5] = 1.0e-5
            emukit_method.set_data(X, Y)

            start_time = time.time()
            emukit_model.optimize()
            time_count = time_count + time.time() - start_time

            integral_mean, integral_variance = emukit_method.integrate()
            integral_mean_list[ii + 1] = integral_mean
            integral_std_list[ii + 1] = np.sqrt(integral_variance)

        if config.plot:

            mu_plot_final, var_plot_final = emukit_method.predict(x_plot)

            y_plot_integral = 1/np.sqrt(integral_variance * 2 * np.pi) * \
            np.exp( - (x_plot_integral - integral_mean)**2 / (2 * integral_variance) )

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(x_plot_integral,
                     y_plot_integral_initial,
                     "gray",
                     label="initial integral density")
            plt.plot(x_plot_integral,
                     y_plot_integral,
                     "k",
                     label="new integral density")
            plt.axvline(initial_integral_mean,
                        color="gray",
                        label="initial integral estimate",
                        linestyle="--")
            plt.axvline(integral_mean,
                        color="red",
                        label="new integral estimate",
                        linestyle="--")
            plt.axvline(ground_truth, color="blue", label="ground truth integral", \
                        linestyle="--")
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$F$")
            plt.ylabel(r"$p(F)$")
            plt.grid(True)
            plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
            plt.show()

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(emukit_model.X,
                     emukit_model.Y,
                     "ro",
                     markersize=10,
                     label="Observations")
            plt.plot(x_plot, y_plot, "k", label="The Integrand")
            plt.plot(x_plot, mu_plot_final, "C0", label="Model")
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.6)
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + 2 * np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - 2 * np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.4)
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + 3 * np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - 3 * np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.2)
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$x$")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.xlim(lb_scaled, ub_scaled)
            plt.show()

        print('The estimated integral is: ', round(integral_mean, 4))
        print('with a credible interval: ',
              round(2 * np.sqrt(integral_variance), 4), '.')
        print('The ground truth rounded to 2 digits for comparison is: ',
              round(ground_truth, 4), '.')

        integral_error_list = np.abs(integral_mean_list - ground_truth)
        result['integral_error_list'] = integral_error_list
        integral_error_list_scaledback = integral_error_list * std_Y.item()
        for jj in range(data_dim):
            integral_error_list_scaledback = integral_error_list_scaledback * std[
                0, jj]
        result[
            'integral_error_list_scaledback'] = integral_error_list_scaledback
        result['integral_std_list'] = integral_std_list
        result['time_elapsed'] = time_count
        results_list[kk] = result
        print(time_count)

        if config.plot:
            plt.figure(figsize=(12, 8))
            plt.fill_between(np.arange(config.bq_iter + 1) + 1,
                             integral_error_list - 0.2 * integral_std_list,
                             integral_error_list + 0.2 * integral_std_list,
                             color='red',
                             alpha=0.15)
            plt.plot(np.arange(config.bq_iter + 1) + 1,
                     integral_error_list,
                     'or-',
                     lw=2,
                     label='Estimated integral')
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"iteration")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.show()

        #end of one run

    return results_list