def integral_var_uniform(num_samples: int, model: VanillaBayesianQuadrature):
    bounds = model.integral_bounds.bounds
    samples = _sample_uniform(num_samples, bounds)
    _, gp_cov_at_samples = model.predict_with_full_covariance(samples)
    differences = np.array([x[1] - x[0] for x in bounds])
    volume = np.prod(differences)
    return np.sum(gp_cov_at_samples) * (volume / num_samples)**2
def integral_mean_uniform(num_samples: int, model: VanillaBayesianQuadrature):
    bounds = model.integral_bounds.bounds
    samples = _sample_uniform(num_samples, bounds)
    gp_mean_at_samples, _ = model.predict(samples)
    differences = np.array([x[1] - x[0] for x in bounds])
    volume = np.prod(differences)
    return np.mean(gp_mean_at_samples) * volume
def model_lebesgue_normalized(gpy_model):
    measure = LebesgueMeasure.from_bounds(bounds=gpy_model.X.shape[1] *
                                          [(-1, 2)],
                                          normalized=True)
    qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(gpy_model.kern), measure)
    basegp = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
    return VanillaBayesianQuadrature(base_gp=basegp,
                                     X=gpy_model.X,
                                     Y=gpy_model.Y)
def model_gaussian(gpy_model):
    X, Y = gpy_model.X, gpy_model.Y
    measure = GaussianMeasure(mean=np.arange(gpy_model.X.shape[1]),
                              variance=np.linspace(0.2, 1.5, X.shape[1]))
    qrbf = QuadratureRBFGaussianMeasure(RBFGPy(gpy_model.kern),
                                        measure=measure)
    basegp = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
    return VanillaBayesianQuadrature(base_gp=basegp,
                                     X=gpy_model.X,
                                     Y=gpy_model.Y)
def model():
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, 2)
    y_init = rng.rand(5, 1)

    gpy_kernel = GPy.kern.RBF(input_dim=x_init.shape[1])
    gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=gpy_kernel)
    qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(gpy_kernel), integral_bounds=x_init.shape[1] * [(-3, 3)])
    basegp = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
    model = VanillaBayesianQuadrature(base_gp=basegp, X=x_init, Y=y_init)
    return model
 def _wrap_emukit(self, gpy_gp: GPy.core.GP):
     """
     Wrap GPy GP around Emukit interface to enable subsequent quadrature
     :param gpy_gp:
     :return:
     """
     # gpy_gp.optimize()
     rbf = RBFGPy(gpy_gp.kern)
     qrbf = QuadratureRBF(rbf, integral_bounds=[(-4, 8)] * self.dimensions)
     model = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_gp)
     method = VanillaBayesianQuadrature(base_gp=model)
     return method
def model_with_density():
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, 2)
    y_init = rng.rand(5, 1)

    gpy_kernel = GPy.kern.RBF(input_dim=x_init.shape[1])
    gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=gpy_kernel)
    measure = IsotropicGaussianMeasure(mean=np.arange(x_init.shape[1]), variance=2.)
    qrbf = QuadratureRBFIsoGaussMeasure(RBFGPy(gpy_kernel), measure=measure)
    basegp = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
    model = VanillaBayesianQuadrature(base_gp=basegp, X=x_init, Y=y_init)
    return model
Esempio n. 8
0
def loop():
    init_size = 5
    x_init = np.random.rand(init_size, 2)
    y_init = np.random.rand(init_size, 1)
    bounds = [(-1, 1), (0, 1)]

    gpy_model = GPy.models.GPRegression(X=x_init,
                                        Y=y_init,
                                        kernel=GPy.kern.RBF(
                                            input_dim=x_init.shape[1],
                                            lengthscale=1.0,
                                            variance=1.0))
    emukit_measure = LebesgueMeasure.from_bounds(bounds, normalized=False)
    emukit_qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(gpy_model.kern),
                                               measure=emukit_measure)
    emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf,
                                          gpy_model=gpy_model)
    emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model,
                                              X=x_init,
                                              Y=y_init)
    emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
    return emukit_loop, init_size, x_init, y_init
Esempio n. 9
0
def vanilla_bq_model(gpy_model, continuous_space, n_dims):
    integral_bounds = continuous_space.get_bounds()
    model = convert_gpy_model_to_emukit_model(gpy_model.model, integral_bounds)
    return VanillaBayesianQuadrature(model)
Esempio n. 10
0
def vanilla_bq_model(gpy_model, continuous_space, n_dims):
    integral_bounds = continuous_space.get_bounds()
    model = create_emukit_model_from_gpy_model(gpy_model.model,
                                               integral_bounds, None)
    return VanillaBayesianQuadrature(model, model.X, model.Y)
Esempio n. 11
0
def bq_loop(config, ai_model=None):
    user_function, integral_bounds = eval(config.name)()
    lb = integral_bounds[0][0]  # lower bound
    ub = integral_bounds[0][1]  # upper bound

    data_dim = config.data_dim
    init_num_data = config.init_num_data
    interval_std = config.interval_std
    interval = np.zeros((1, data_dim))
    std = np.zeros((1, data_dim))
    mean = np.zeros((1, data_dim))
    integral_bounds_scaled = integral_bounds.copy()
    for ii in range(data_dim):
        interval[0, ii] = integral_bounds[ii][1] - integral_bounds[ii][0]
        std[0, ii] = interval[0, ii] / interval_std
        mean[0, ii] = (integral_bounds[ii][1] + integral_bounds[ii][0]) / 2
        integral_bounds_scaled[ii] = ((integral_bounds[ii] - mean[0, ii]) /
                                      std[0, ii]).tolist()

    lb_scaled = integral_bounds_scaled[0][0]  # lower bound
    ub_scaled = integral_bounds_scaled[0][1]  # upper bound
    lb = integral_bounds[0][0]  # lower bound
    ub = integral_bounds[0][1]  # upper bound

    results_list = [None] * config.repeated_runs
    npr = np.random.RandomState(config.seed)
    for kk in tqdm(range(config.repeated_runs)):

        integral_mean_list = np.zeros(config.bq_iter + 1)
        integral_std_list = np.zeros(config.bq_iter + 1)
        #initialize data points
        X_init = (npr.rand(init_num_data, data_dim) - 0.5) * interval + mean
        Y_init = user_function.f(X_init)
        X_init_norm = (X_init - mean) / std

        Y_init_norm, mean_Y, std_Y = standardize(Y_init)

        X = X_init_norm
        Y = Y_init_norm
        X[np.abs(X) < 1.0e-5] = 1.0e-5
        #normalized function
        function_norm = lambda x: (user_function.f(x * std + mean) - mean_Y
                                   ) / std_Y

        if data_dim == 1:
            ground_truth = univariate_approximate_ground_truth_integral(
                function_norm, (lb_scaled, ub_scaled))[0]
        elif data_dim == 2:
            ground_truth = bivariate_approximate_ground_truth_integral(
                function_norm, integral_bounds_scaled)[0]

        #Set up Emukit_BO_BQ_GP_Model
        emukit_gp_model = Emukit_BO_BQ_GP_Model(X_init_norm, Y_init_norm,
                                                config, ai_model)
        emukit_gp_model.optimize()
        emukit_gp_model.set_kernel()
        emukit_quad_kern = QuadratureKernelCustom(emukit_gp_model,
                                                  integral_bounds_scaled)
        emukit_model = BaseGaussianProcessCustomModel(kern=emukit_quad_kern,
                                                      gp_model=emukit_gp_model)
        emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model,
                                                  X=X,
                                                  Y=Y)

        #set up Bayesian quadrature
        if config.plot:
            x_plot = np.linspace(integral_bounds_scaled[0][0],
                                 integral_bounds_scaled[0][1], 300)[:, None]
            y_plot = function_norm(x_plot)

            mu_plot, var_plot = emukit_method.predict(x_plot)

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(X_init_norm,
                     Y_init_norm,
                     "ro",
                     markersize=10,
                     label="Observations")
            plt.plot(x_plot, y_plot, "k", label="The Integrand")
            plt.plot(x_plot, mu_plot, "C0", label="Model")
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.6)
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.4)
            plt.fill_between(x_plot[:, 0],
                             mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
                             mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0],
                             color="C0",
                             alpha=0.2)
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$x$")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.xlim(lb_scaled, ub_scaled)
            plt.show()

        initial_integral_mean, initial_integral_variance = emukit_method.integrate(
        )
        integral_mean_list[0] = initial_integral_mean
        integral_std_list[0] = np.sqrt(initial_integral_variance)

        if config.plot:

            x_plot_integral = np.linspace(
                initial_integral_mean - 3 * np.sqrt(initial_integral_variance),
                initial_integral_mean + 3 * np.sqrt(initial_integral_variance),
                200)
            y_plot_integral_initial = 1/np.sqrt(initial_integral_variance * 2 * np.pi) * \
            np.exp( - (x_plot_integral - initial_integral_mean)**2 / (2 * initial_integral_variance) )
            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(x_plot_integral,
                     y_plot_integral_initial,
                     "k",
                     label="initial integral density")
            plt.axvline(initial_integral_mean, color="red", label="initial integral estimate", \
                        linestyle="--")
            plt.axvline(ground_truth, color="blue", label="ground truth integral", \
                        linestyle="--")
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$F$")
            plt.ylabel(r"$p(F)$")
            plt.grid(True)
            plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
            plt.show()

        print('The initial estimated integral is: ',
              round(initial_integral_mean, 4))
        print('with a credible interval: ',
              round(2 * np.sqrt(initial_integral_variance), 4), '.')
        print('The ground truth rounded to 2 digits for comparison is: ',
              round(ground_truth, 4), '.')

        for ii in range(config.bq_iter):
            time_count = 0
            result = {}
            ivr_acquisition = IntegralVarianceReduction(emukit_method)
            space = ParameterSpace(emukit_method.reasonable_box_bounds.
                                   convert_to_list_of_continuous_parameters())
            num_steps = 200
            num_init_points = 5
            optimizer = LocalSearchAcquisitionOptimizer(
                space, num_steps, num_init_points)
            x_new, _ = optimizer.optimize(ivr_acquisition)
            y_new = function_norm(x_new)
            X = np.append(X, x_new, axis=0)
            Y = np.append(Y, y_new, axis=0)
            X[np.abs(X) < 1.0e-5] = 1.0e-5
            emukit_method.set_data(X, Y)

            start_time = time.time()
            emukit_model.optimize()
            time_count = time_count + time.time() - start_time

            integral_mean, integral_variance = emukit_method.integrate()
            integral_mean_list[ii + 1] = integral_mean
            integral_std_list[ii + 1] = np.sqrt(integral_variance)

        if config.plot:

            mu_plot_final, var_plot_final = emukit_method.predict(x_plot)

            y_plot_integral = 1/np.sqrt(integral_variance * 2 * np.pi) * \
            np.exp( - (x_plot_integral - integral_mean)**2 / (2 * integral_variance) )

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(x_plot_integral,
                     y_plot_integral_initial,
                     "gray",
                     label="initial integral density")
            plt.plot(x_plot_integral,
                     y_plot_integral,
                     "k",
                     label="new integral density")
            plt.axvline(initial_integral_mean,
                        color="gray",
                        label="initial integral estimate",
                        linestyle="--")
            plt.axvline(integral_mean,
                        color="red",
                        label="new integral estimate",
                        linestyle="--")
            plt.axvline(ground_truth, color="blue", label="ground truth integral", \
                        linestyle="--")
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$F$")
            plt.ylabel(r"$p(F)$")
            plt.grid(True)
            plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
            plt.show()

            plt.figure(figsize=FIGURE_SIZE)
            plt.plot(emukit_model.X,
                     emukit_model.Y,
                     "ro",
                     markersize=10,
                     label="Observations")
            plt.plot(x_plot, y_plot, "k", label="The Integrand")
            plt.plot(x_plot, mu_plot_final, "C0", label="Model")
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.6)
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + 2 * np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - 2 * np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.4)
            plt.fill_between(
                x_plot[:, 0],
                mu_plot_final[:, 0] + 3 * np.sqrt(var_plot_final)[:, 0],
                mu_plot_final[:, 0] - 3 * np.sqrt(var_plot_final)[:, 0],
                color="C0",
                alpha=0.2)
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"$x$")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.xlim(lb_scaled, ub_scaled)
            plt.show()

        print('The estimated integral is: ', round(integral_mean, 4))
        print('with a credible interval: ',
              round(2 * np.sqrt(integral_variance), 4), '.')
        print('The ground truth rounded to 2 digits for comparison is: ',
              round(ground_truth, 4), '.')

        integral_error_list = np.abs(integral_mean_list - ground_truth)
        result['integral_error_list'] = integral_error_list
        integral_error_list_scaledback = integral_error_list * std_Y.item()
        for jj in range(data_dim):
            integral_error_list_scaledback = integral_error_list_scaledback * std[
                0, jj]
        result[
            'integral_error_list_scaledback'] = integral_error_list_scaledback
        result['integral_std_list'] = integral_std_list
        result['time_elapsed'] = time_count
        results_list[kk] = result
        print(time_count)

        if config.plot:
            plt.figure(figsize=(12, 8))
            plt.fill_between(np.arange(config.bq_iter + 1) + 1,
                             integral_error_list - 0.2 * integral_std_list,
                             integral_error_list + 0.2 * integral_std_list,
                             color='red',
                             alpha=0.15)
            plt.plot(np.arange(config.bq_iter + 1) + 1,
                     integral_error_list,
                     'or-',
                     lw=2,
                     label='Estimated integral')
            plt.legend(loc=2, prop={'size': LEGEND_SIZE})
            plt.xlabel(r"iteration")
            plt.ylabel(r"$f(x)$")
            plt.grid(True)
            plt.show()

        #end of one run

    return results_list
    METHOD = "Vanilla BQ"

    X = np.array([[-1, 1], [0, 0], [-2, 0.1]])
    Y = np.array([[1], [2], [3]])
    D = X.shape[1]
    integral_bounds = [(-1, 2), (-3, 3)]

    gpy_model = GPy.models.GPRegression(X=X,
                                        Y=Y,
                                        kernel=GPy.kern.RBF(input_dim=D))
    qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(gpy_model.kern),
                                        integral_bounds=integral_bounds)
    model = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)

    vanilla_bq = VanillaBayesianQuadrature(base_gp=model, X=X, Y=Y)

    print()
    print("method: {}".format(METHOD))
    print("no dimensions: {}".format(D))
    print()

    # === mean =============================================================
    num_runs = 100
    num_samples = 1e6
    num_std = 3

    mZ_SAMPLES = np.zeros(num_runs)

    mZ, _ = vanilla_bq.integrate()
    for i in range(num_runs):