x_input = Variable(torch.FloatTensor(ndata, ndim).uniform_(-1, 1))
    x_input.data[0, :] = 0
    x_input.data[1, :] = 1
    output = torch.cos(x_input[:, 0:1] + (x_input[:, 1:2] / math.pi * 0.5) +
                       torch.prod(x_input, 1, keepdim=True))
    reference = torch.min(output).data.squeeze()[0]
    train_data = (x_input, output)

    model_rect = GPRegression(kernel=Matern52(ndim, id_transform))
    kernel_input_map = x_radial
    model_sphere1 = GPRegression(
        kernel=Matern52(kernel_input_map.dim_change(ndim), kernel_input_map))
    model_sphere2 = GPRegression(
        kernel=Matern52(kernel_input_map.dim_change(ndim), kernel_input_map))

    inference_rect = Inference((x_input, output), model_rect)
    inference_sphere1 = Inference((x_input, output), model_sphere1)
    inference_sphere2 = ShadowInference((x_input, output), model_sphere2)
    inference_rect.model_param_init()
    inference_sphere1.model_param_init()
    inference_sphere2.model_param_init()

    params_rect = inference_rect.learning(n_restarts=10)
    params_sphere1 = inference_sphere1.learning(n_restarts=10)
    inference_sphere2.matrix_update(model_sphere1.param_to_vec())

    if ndim == 2:
        x1_grid, x2_grid = np.meshgrid(np.linspace(-1, 1, 50),
                                       np.linspace(-1, 1, 50))
        x_pred_points = Variable(
            torch.from_numpy(
예제 #2
0
    ndata = 3
    ndim = 2
    search_radius = ndim**0.5
    x_input = Variable(torch.FloatTensor(ndata, ndim).uniform_(-1, 1))
    x_input.data[0, :] = 0
    x_input.data[1, :] = 1
    output = torch.cos(x_input[:, 0:1] + (x_input[:, 1:2] / math.pi * 0.5) +
                       torch.prod(x_input, 1, keepdim=True))
    reference = torch.min(output).data.squeeze()[0]
    train_data = (x_input, output)

    model_normal = GPRegression(kernel=RadializationKernel(3, search_radius))
    model_shadow = GPRegression(kernel=RadializationKernel(3, search_radius))

    inference_normal = Inference((x_input, output), model_normal)
    inference_shadow = ShadowInference((x_input, output), model_shadow)
    inference_normal.init_parameters()
    inference_shadow.init_parameters()

    params_normal = inference_normal.learning(n_restarts=5)
    inference_shadow.cholesky_update(model_normal.param_to_vec())

    if ndim == 2:
        x1_grid, x2_grid = np.meshgrid(np.linspace(-1, 1, 50),
                                       np.linspace(-1, 1, 50))
        x_pred_points = Variable(
            torch.from_numpy(
                np.vstack([x1_grid.flatten(),
                           x2_grid.flatten()]).astype(np.float32)).t())
        pred_mean_normal, pred_var_normal = inference_normal.predict(
예제 #3
0
	import matplotlib.pyplot as plt

	ndata = 6
	ndim = 1
	model_for_generating = GPRegression(kernel=SquaredExponentialKernel(ndim))
	train_x = Variable(torch.FloatTensor(ndata, ndim).uniform_(-2, 2))
	chol_L = torch.potrf(
		(model_for_generating.kernel(train_x) + torch.diag(model_for_generating.likelihood(train_x))).data, upper=False)
	train_y = model_for_generating.mean(train_x) + Variable(torch.mm(chol_L, torch.randn(ndata, 1)))
	# train_y = torch.sin(2 * math.pi * torch.sum(train_x, 1, keepdim=True)) + Variable(torch.FloatTensor(train_x.size(0), 1).normal_())
	train_data = (train_x, train_y)
	param_original = model_for_generating.param_to_vec()
	reference = torch.min(train_y.data)

	model_for_learning = GPRegression(kernel=SquaredExponentialKernel(ndim))
	inference = Inference(train_data, model_for_learning)
	model_for_learning.vec_to_param(param_original)
	param_samples_learning = inference.learning(n_restarts=10)
	model_for_learning.vec_to_param(param_original)
	param_samples_sampling = inference.sampling(n_sample=5, n_burnin=200, n_thin=10)

	if ndim == 1:
		ax11 = plt.subplot(221)
		ax11.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
		ax11.axhline(reference, ls='--', alpha=0.5)
		ax12 = plt.subplot(222, sharex=ax11, sharey=ax11)
		ax12.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
		ax12.axhline(reference, ls='--', alpha=0.5)
		ax21 = plt.subplot(223, sharex=ax11)
		ax22 = plt.subplot(224, sharex=ax11)
예제 #4
0
def BO(n_eval=200, path=None, func=None, ndim=None):
	assert (path is None) != (func is None)
	if path is not None:
		if not os.path.exists(path):
			path = os.path.join(EXPERIMENT_DIR, path)
		model_filename = os.path.join(path, 'model.pt')
		data_config_filename = os.path.join(path, 'data_config.pkl')

		model = torch.load(model_filename)
		data_config_file = open(data_config_filename, 'r')
		for key, value in pickle.load(data_config_file).iteritems():
			exec(key + '=value')
		data_config_file.close()

		inference = Inference((x_input, output), model)
	else:
		assert (func.dim == 0) != (ndim is None)
		if ndim is None:
			ndim = func.dim
		dir_list = [elm for elm in os.listdir(EXPERIMENT_DIR) if os.path.isdir(os.path.join(EXPERIMENT_DIR, elm))]
		folder_name = func.__name__ + '_D' + str(ndim) + '_' + exp_str + '_' + datetime.now().strftime('%Y%m%d-%H:%M:%S:%f')
		os.makedirs(os.path.join(EXPERIMENT_DIR, folder_name))
		model_filename = os.path.join(EXPERIMENT_DIR, folder_name, 'model.pt')
		data_config_filename = os.path.join(EXPERIMENT_DIR, folder_name, 'data_config.pkl')

		search_sphere_radius = ndim ** 0.5

		x_input = Variable(torch.ger(-torch.arange(0, 2), torch.ones(ndim)))
		output = Variable(torch.zeros(x_input.size(0), 1))
		for i in range(x_input.size(0)):
			output[i] = func(x_input[i])

		kernel_input_map = x2radial
		model = GPRegression(kernel=Matern52(ndim=kernel_input_map.dim_change(ndim), input_map=kernel_input_map))

		time_list = [time.time()] * 2
		elapse_list = [0, 0]
		pred_mean_list = [0, 0]
		pred_std_list = [0, 0]
		pred_var_list = [0, 0]
		pred_stdmax_list = [1, 1]
		pred_varmax_list = [1, 1]
		reference_list = [output.data.squeeze()[0]] * 2
		refind_list = [1, 1]
		dist_to_ref_list = [0, 0]

		inference = Inference((x_input, output), model)
		inference.init_parameters()
		inference.sampling(n_sample=1, n_burnin=99, n_thin=1)

	stored_variable_names = locals().keys()
	ignored_variable_names = ['n_eval', 'path', 'data_config_file', 'dir_list', 'folder_name',
	                          'next_ind', 'model_filename', 'data_config_filename', 'i',
	                          'kernel_input_map', 'model', 'inference']
	stored_variable_names = set(stored_variable_names).difference(set(ignored_variable_names))

	bnd = radial_bound(search_sphere_radius)

	for _ in range(3):
		print('Experiment based on data in ' + os.path.split(model_filename)[0])

	for _ in range(n_eval):
		inference = Inference((x_input, output), model)

		reference, ref_ind = torch.min(output, 0)
		reference = reference.data.squeeze()[0]
		gp_hyper_params = inference.sampling(n_sample=10, n_burnin=0, n_thin=1)
		inferences = deepcopy_inference(inference, gp_hyper_params)

		x0_cand = optimization_candidates(x_input, output, -1, 1)
		x0, sample_info = optimization_init_points(x0_cand, inferences, reference=reference)
		next_x_point, pred_mean, pred_std, pred_var, pred_stdmax, pred_varmax = suggest(inferences, x0=x0, bounds=bnd, reference=reference)

		time_list.append(time.time())
		elapse_list.append(time_list[-1] - time_list[-2])
		pred_mean_list.append(pred_mean.squeeze()[0])
		pred_std_list.append(pred_std.squeeze()[0])
		pred_var_list.append(pred_var.squeeze()[0])
		pred_stdmax_list.append(pred_stdmax.squeeze()[0])
		pred_varmax_list.append(pred_varmax.squeeze()[0])
		reference_list.append(reference)
		refind_list.append(ref_ind.data.squeeze()[0] + 1)
		dist_to_ref_list.append(torch.sum((next_x_point - x_input[ref_ind].data) ** 2) ** 0.5)

		x_input = torch.cat([x_input, Variable(next_x_point)], 0)
		output = torch.cat([output, func(x_input[-1])])

		min_ind = torch.min(output, 0)[1]
		min_loc = x_input[min_ind]
		min_val = output[min_ind]
		dist_to_suggest = torch.sum((x_input - x_input[-1]).data ** 2, 1) ** 0.5
		dist_to_min = torch.sum((x_input - min_loc).data ** 2, 1) ** 0.5
		out_of_box = torch.sum((torch.abs(x_input.data) > 1), 1)
		print('')
		for i in range(x_input.size(0)):
			time_str = time.strftime('%H:%M:%S', time.gmtime(time_list[i])) + '(' + time.strftime('%H:%M:%S', time.gmtime(elapse_list[i])) + ')  '
			data_str = ('%3d-th : %+14.4f(R:%8.4f[%4d]/ref:[%3d]%8.4f), '
			            'mean : %+.4E, std : %.4E(%5.4f), var : %.4E(%5.4f), '
			            '2ownMIN : %8.4f, 2curMIN : %8.4f, 2new : %8.4f' %
			            (i + 1, output.data.squeeze()[i], torch.sum(x_input.data[i] ** 2) ** 0.5, out_of_box[i], refind_list[i], reference_list[i],
			             pred_mean_list[i], pred_std_list[i], pred_std_list[i] / pred_stdmax_list[i], pred_var_list[i], pred_var_list[i] / pred_varmax_list[i],
			             dist_to_ref_list[i], dist_to_min[i], dist_to_suggest[i]))
			min_str = '  <========= MIN' if i == min_ind.data.squeeze()[0] else ''
			print(time_str + data_str + min_str)
		print(model.kernel.__class__.__name__)

		torch.save(model, model_filename)
		stored_variable = dict()
		for key in stored_variable_names:
			stored_variable[key] = locals()[key]
		f = open(data_config_filename, 'w')
		pickle.dump(stored_variable, f)
		f.close()

	for _ in range(3):
		print('Experiment based on data in ' + os.path.split(model_filename)[0])

	return os.path.split(model_filename)[0]