def run_methods(train_points, train_targets, test_points, test_targets, model_parameters, m_list, file_name, title, show=False, full=True, vi=True): method = 'means' optimizer = 'L-BFGS-B' max_iter = 50 options = {'maxiter': max_iter, 'disp': False, 'mydisp': True} means_r2 = [] vi_r2 = [] for m in m_list: print('m:', m) print('Finding means...') means = KMeans(n_clusters=m, n_init=1, max_iter=20) means.fit(train_points.T) inputs = means.cluster_centers_.T print('...found') model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method='means', optimizer=optimizer) res = new_gp.fit(train_points, train_targets, num_inputs=m, optimizer_options=options, inputs=inputs) predicted_y_test, _, _ = new_gp.predict(test_points) means_r2.append(r2_score(test_targets, predicted_y_test)) if vi: model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method='vi', optimizer=optimizer) res = new_gp.fit(train_points, train_targets, num_inputs=m, optimizer_options=options, inputs=inputs) predicted_y_test, _, _ = new_gp.predict(test_points) vi_r2.append(r2_score(test_targets, predicted_y_test)) if full: model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method='brute') res = new_gp.fit(train_points, train_targets, max_iter=max_iter) predicted_y_test, _, _ = new_gp.predict(test_points, train_points, train_targets) brute_r2 = r2_score(test_targets, predicted_y_test) plt.plot(range(len(m_list)), means_r2, '-kx', label='vi-means') if vi: plt.plot(range(len(m_list)), vi_r2, '-rx', label='vi') if full: plt.plot(range(len(m_list)), len(m_list) * [brute_r2], '--g', label='full GP') plt.xticks(range(len(m_list)), m_list) plt.xlabel('m') plt.ylabel('$R^2$-score on test data') # plt.ylim(0.5, 1) plt.legend(loc=4) plt.title(title) plt.savefig('../Plots/inducing_inputs/'+file_name + '.pgf') if show: plt.show()
def run_methods(train_points, train_targets, test_points, test_targets, model_parameters, optimizer_options, file_name, ind_num, title, show=False): method = 'means' print('Finding means...') means = KMeans(n_clusters=ind_num, n_init=1, max_iter=20) means.fit(train_points.T) inputs = means.cluster_centers_.T print('...found') for optimizer, color, opts in zip(['L-BFGS-B', 'Projected Newton'], ['-kx', '-mx'], optimizer_options): print('Optimizer', optimizer) model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method=method, optimizer=optimizer) res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opts, inputs=inputs) name = optimizer metric = lambda w: new_gp.get_prediction_quality(w, train_points, train_targets, test_points, test_targets) x_lst, y_lst = res.plot_performance(metric, 'i', freq=1) plt.plot(x_lst, y_lst, color, label=name) plt.xlabel('Epoch') plt.ylabel('$R^2$-score on test data') plt.legend() plt.title(title) plt.savefig('../Plots/vi_variations/'+file_name + '.pgf') if show: plt.show()
def run_methods(train_points, train_targets, test_points, test_targets, model_parameters, optimizer_options, file_name, ind_num, title, show=False): print('Finding means...') means = KMeans(n_clusters=ind_num, n_init=1, max_iter=40) means.fit(train_points.T) inputs = means.cluster_centers_.T print('...found') method = 'svi' parametrization = 'cholesky' optimizer = 'L-BFGS-B' color = '-go' opts = optimizer_options[0] print('svi-L-BFGS-B') model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method=method, parametrization=parametrization) res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opts, inputs=inputs) name = 'svi-L-BFGS-B' metric = lambda w: new_gp.get_prediction_quality(w, test_points, test_targets) x_lst, y_lst = res.plot_performance(metric, 'i', freq=1) plt.plot(x_lst, y_lst, color, label=name) print('vi-means') method = 'means' optimizer = 'L-BFGS-B' opt_options = optimizer_options[1] model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method=method, optimizer=optimizer) res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opt_options, inputs=inputs) name = 'vi-means' metric = lambda w: new_gp.get_prediction_quality(w, train_points, train_targets, test_points, test_targets) x_lst, y_lst = res.plot_performance(metric, 'i', freq=1) plt.plot(x_lst, y_lst, '-kx', label=name) print(x_lst[-1]) plt.xlabel('Epoch') plt.ylabel('$R^2$-score on test data') plt.legend() plt.title(title) plt.savefig('../Plots/vi_vs_svi/'+file_name + '.pgf') if show: plt.show()
def run_methods(train_points, train_targets, test_points, test_targets, model_parameters, optimizer_options, file_name, ind_num, title, show=False): print('Finding means...') means = KMeans(n_clusters=ind_num, n_init=1, max_iter=40) means.fit(train_points.T) inputs = means.cluster_centers_.T print('...found') # method = 'svi' # parametrization = 'natural' # # optimizer = 'L-BFGS-B' # color = '-yo' # opts = optimizer_options[0] # print('svi') # model_covariance_obj = SquaredExponential(np.copy(model_parameters)) # new_gp = GPR(model_covariance_obj, method=method, parametrization=parametrization) # res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opts, inputs=inputs) # name = 'svi-natural' # metric = lambda w: new_gp.get_prediction_quality(w, test_points, test_targets) # x_lst, y_lst = res.plot_performance(metric, 'i', freq=1) # plt.plot(x_lst, y_lst, color, label=name) print('vi-means') method = 'means' opt_options = optimizer_options[1] model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method=method) res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opt_options, inputs=inputs) name = 'vi-means' metric = lambda w: new_gp.get_prediction_quality(w, train_points, train_targets, test_points, test_targets) x_lst, y_lst = res.plot_performance(metric, 'i', freq=1) plt.plot(x_lst, y_lst, '-kx', label=name) print(x_lst[-1]) plt.ylabel('$R^2$-score on test data') plt.legend() plt.title(title) # plt.savefig('../Plots/vi_vs_svi/'+file_name + '.pgf') if show: plt.show()
def run_methods(train_points, train_targets, test_points, test_targets, model_parameters, optimizer_options, file_name, ind_num, title, show=False): method = 'svi' parametrization = 'cholesky' means = KMeans(n_clusters=ind_num, n_init=3, max_iter=100, random_state=241) means.fit(train_points.T) inputs = means.cluster_centers_.T # for optimizer, color, opts in zip(['SAG', 'FG', 'L-BFGS-B'], ['-ro', '-bo', '-go'], # optimizer_options[:-1]): # print('Optimizer', optimizer) # model_covariance_obj = SquaredExponential(np.copy(model_parameters)) # new_gp = GPR(model_covariance_obj, method=method, parametrization=parametrization, optimizer=optimizer) # res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opts, inputs=inputs) # name = 'svi-' + optimizer # metric = lambda w: new_gp.get_prediction_quality(w, test_points, test_targets) # x_lst, y_lst = res.plot_performance(metric, 'i', freq=5) # plt.plot(x_lst, y_lst, color, label=name) parametrization = 'natural' print('Natural parametrization') opt_options = optimizer_options[-1] model_covariance_obj = SquaredExponential(np.copy(model_parameters)) new_gp = GPR(model_covariance_obj, method=method, parametrization=parametrization) res = new_gp.fit(train_points, train_targets, num_inputs=ind_num, optimizer_options=opt_options, inputs=inputs) name = 'svi-natural' metric = lambda w: new_gp.get_prediction_quality(w, test_points, test_targets) x_lst, y_lst = res.plot_performance(metric, 'i', freq=5) print(y_lst) plt.plot(x_lst, y_lst, '-yo', label=name) plt.xlabel('Epoch') plt.ylabel('$R^2$-score on test data') plt.legend() plt.title(title) # plt.savefig('../Plots/svi_variations/'+file_name + '.pgf') if show: plt.show()
if dim == 1: x_test = np.linspace(0, 1, test_num) x_test = x_test.reshape(1, test_num) else: x_test = np.random.rand(dim, test_num) y_tr, y_test = gp.generate_data(x_tr, x_test, seed=seed) data_points = [] data_targets = [] fig = plt.figure() gp_plot_reg_data(x_test, y_test, 'y-') means_gp = GPR(model_covariance_obj, method='means') means_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, optimizer_options=lbfgsb_options) print(model_covariance_obj.get_params()) means_inducing_points, means_mean, means_cov = means_gp.inducing_inputs means_y_test, means_high, means_low = means_gp.predict(x_test) def onclick(event): plt.close('all') point_x, point_y = event.xdata, event.ydata data_points.append(point_x) data_targets.append(point_y) x_tr = np.array(data_points).reshape(-1)[None, :] y_tr = np.array(data_targets) new_gp = GPR(model_covariance_obj, method=method) # new_gp.fit(x_tr, y_tr, max_iter=max_iter)
if method == 'brute': new_gp = GPR(model_covariance_obj) # res = new_gp.fit(x_tr, y_tr, max_iter=max_iter) predicted_y_test, high, low, samples = new_gp.predict(x_test, x_tr, y_tr, n_samples=3) print(len(samples)) print(samples[0].shape) elif method == 'means' or method == 'vi': model_covariance_obj = SquaredExponential(model_params) new_gp = GPR(model_covariance_obj, method=method, optimizer=optimizer) res = new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, optimizer_options=lbfgsb_options) # res = new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, optimizer_options=projected_newton_options) inducing_points, mean, cov = new_gp.inducing_inputs predicted_y_test, high, low = new_gp.predict(x_test, return_confidence_region=True) elif method == 'svi': model_covariance_obj = SquaredExponential(model_params) if parametrization == 'natural': opts = sg_options else: if optimizer == 'L-BFGS-B': opts = lbfgsb_options elif optimizer == 'AdaDelta': opts = sag_options
model_covariance_obj = SquaredExponential(model_params) model_params = np.array([0.6, 0.3, 0.1]) model_covariance_obj = SquaredExponential(model_params) num = 200 test_num = 100 dim = 1 seed = 21 method = 'means' # possible methods: 'brute', 'vi', 'means', 'svi' parametrization = 'natural' # possible parametrizations for svi method: cholesky, natural ind_inputs_num = 30 max_iter = 100 if method == 'brute': new_gp = GPR(model_covariance_obj) new_gp.fit(x_tr, y_tr, max_iter=max_iter) predicted_y_test, high, low = new_gp.predict(x_test, x_tr, y_tr) elif method == 'means' or method == 'vi': model_covariance_obj = SquaredExponential(model_params) new_gp = GPR(model_covariance_obj, method=method) start = time.time() new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, max_iter=max_iter) print(time.time() - start) inducing_points, mean, cov = new_gp.inducing_inputs predicted_y_test, high, low = new_gp.predict(x_test) elif method == 'svi': model_covariance_obj = SquaredExponential(model_params) new_gp = GPR(model_covariance_obj, method=method, parametrization=parametrization) new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, max_iter=max_iter)
else: x_test = np.random.rand(dim, test_num) y_tr, y_test = gp.generate_data(x_tr, x_test, seed=seed) if method == 'brute': new_gp = GPR(model_covariance_obj) # res = new_gp.fit(x_tr, y_tr, max_iter=max_iter) predicted_y_test, high, low, samples = new_gp.predict(x_test, x_tr, y_tr, n_samples=3) print(len(samples)) print(samples[0].shape) elif method == 'means' or method == 'vi': model_covariance_obj = SquaredExponential(model_params) new_gp = GPR(model_covariance_obj, method=method, optimizer=optimizer) res = new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, optimizer_options=lbfgsb_options) # res = new_gp.fit(x_tr, y_tr, num_inputs=ind_inputs_num, optimizer_options=projected_newton_options) inducing_points, mean, cov = new_gp.inducing_inputs predicted_y_test, high, low = new_gp.predict(x_test, return_confidence_region=True) elif method == 'svi': model_covariance_obj = SquaredExponential(model_params) if parametrization == 'natural': opts = sg_options else: if optimizer == 'L-BFGS-B': opts = lbfgsb_options elif optimizer == 'AdaDelta': opts = sag_options else: opts = sg_options