kernel_input_map = x_radial
    model_sphere1 = GPRegression(
        kernel=Matern52(kernel_input_map.dim_change(ndim), kernel_input_map))
    model_sphere2 = GPRegression(
        kernel=Matern52(kernel_input_map.dim_change(ndim), kernel_input_map))

    inference_rect = Inference((x_input, output), model_rect)
    inference_sphere1 = Inference((x_input, output), model_sphere1)
    inference_sphere2 = ShadowInference((x_input, output), model_sphere2)
    inference_rect.model_param_init()
    inference_sphere1.model_param_init()
    inference_sphere2.model_param_init()

    params_rect = inference_rect.learning(n_restarts=10)
    params_sphere1 = inference_sphere1.learning(n_restarts=10)
    inference_sphere2.matrix_update(model_sphere1.param_to_vec())

    if ndim == 2:
        x1_grid, x2_grid = np.meshgrid(np.linspace(-1, 1, 50),
                                       np.linspace(-1, 1, 50))
        x_pred_points = Variable(
            torch.from_numpy(
                np.vstack([x1_grid.flatten(),
                           x2_grid.flatten()]).astype(np.float32)).t())
        pred_mean_rect, pred_var_rect = inference_rect.predict(x_pred_points)
        pred_std_rect = pred_var_rect**0.5
        acq_rect = acquisition(x_pred_points,
                               inference_rect,
                               params_rect,
                               reference=reference)
Exemple #2
0
    x_input.data[1, :] = 1
    output = torch.cos(x_input[:, 0:1] + (x_input[:, 1:2] / math.pi * 0.5) +
                       torch.prod(x_input, 1, keepdim=True))
    reference = torch.min(output).data.squeeze()[0]
    train_data = (x_input, output)

    model_normal = GPRegression(kernel=RadializationKernel(3, search_radius))
    model_shadow = GPRegression(kernel=RadializationKernel(3, search_radius))

    inference_normal = Inference((x_input, output), model_normal)
    inference_shadow = ShadowInference((x_input, output), model_shadow)
    inference_normal.init_parameters()
    inference_shadow.init_parameters()

    params_normal = inference_normal.learning(n_restarts=5)
    inference_shadow.cholesky_update(model_normal.param_to_vec())

    if ndim == 2:
        x1_grid, x2_grid = np.meshgrid(np.linspace(-1, 1, 50),
                                       np.linspace(-1, 1, 50))
        x_pred_points = Variable(
            torch.from_numpy(
                np.vstack([x1_grid.flatten(),
                           x2_grid.flatten()]).astype(np.float32)).t())
        pred_mean_normal, pred_var_normal = inference_normal.predict(
            x_pred_points)
        pred_std_normal = pred_var_normal**0.5
        acq_normal = acquisition(x_pred_points,
                                 deepcopy_inference(inference_normal,
                                                    params_normal),
                                 reference=reference)
Exemple #3
0
if __name__ == '__main__':
	from HyperSphere.GP.kernels.modules.squared_exponential import SquaredExponentialKernel
	from HyperSphere.GP.models.gp_regression import GPRegression
	from HyperSphere.GP.inference.inference import Inference
	import matplotlib.pyplot as plt

	ndata = 6
	ndim = 1
	model_for_generating = GPRegression(kernel=SquaredExponentialKernel(ndim))
	train_x = Variable(torch.FloatTensor(ndata, ndim).uniform_(-2, 2))
	chol_L = torch.potrf(
		(model_for_generating.kernel(train_x) + torch.diag(model_for_generating.likelihood(train_x))).data, upper=False)
	train_y = model_for_generating.mean(train_x) + Variable(torch.mm(chol_L, torch.randn(ndata, 1)))
	# train_y = torch.sin(2 * math.pi * torch.sum(train_x, 1, keepdim=True)) + Variable(torch.FloatTensor(train_x.size(0), 1).normal_())
	train_data = (train_x, train_y)
	param_original = model_for_generating.param_to_vec()
	reference = torch.min(train_y.data)

	model_for_learning = GPRegression(kernel=SquaredExponentialKernel(ndim))
	inference = Inference(train_data, model_for_learning)
	model_for_learning.vec_to_param(param_original)
	param_samples_learning = inference.learning(n_restarts=10)
	model_for_learning.vec_to_param(param_original)
	param_samples_sampling = inference.sampling(n_sample=5, n_burnin=200, n_thin=10)

	if ndim == 1:
		ax11 = plt.subplot(221)
		ax11.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
		ax11.axhline(reference, ls='--', alpha=0.5)
		ax12 = plt.subplot(222, sharex=ax11, sharey=ax11)
		ax12.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
Exemple #4
0
if __name__ == '__main__':
    from HyperSphere.GP.kernels.modules.squared_exponential import SquaredExponentialKernel
    from HyperSphere.GP.models.gp_regression import GPRegression
    import matplotlib.pyplot as plt
    ndata = 20
    ndim = 1
    model_for_generating = GPRegression(kernel=SquaredExponentialKernel(ndim))
    train_x = Variable(torch.FloatTensor(ndata, ndim).uniform_(-2, 2))
    chol_L = torch.potrf(
        (model_for_generating.kernel(train_x) +
         torch.diag(model_for_generating.likelihood(train_x))).data,
        upper=False)
    train_y = model_for_generating.mean(train_x) + Variable(
        torch.mm(chol_L, torch.randn(ndata, 1)))
    train_data = (train_x, train_y)
    param_original = model_for_generating.param_to_vec()
    generated_nll = Inference(
        train_data, model_for_generating).negative_log_likelihood().data[0, 0]

    model_for_learning = GPRegression(kernel=SquaredExponentialKernel(ndim))
    inference = Inference(train_data, model_for_learning)
    model_for_learning.vec_to_param(param_original)
    param_samples_learning = inference.learning(n_restarts=10)
    model_for_learning.vec_to_param(param_original)
    param_samples_sampling = inference.sampling()

    if ndim == 1:
        pred_x = torch.linspace(-2.5, 2.5, 100).view(-1, 1)
        fig, axes = plt.subplots(nrows=1, ncols=3, sharex=True, sharey=True)

        one_dim_plotting(axes[0],