# get posterior predictive full_pred_mean, full_pred_covar = model.joint_posterior_predictive( x_train_normalised, y_train_normalised, x_test_normalised, noise=True) # unnormalise the predictive outputs full_pred_mean = full_pred_mean * train_sd[-1] + train_mean[-1] full_pred_covar = full_pred_covar * (train_sd[-1])**2 full_logsigmaf2 = deepcopy(model.logsigmaf2) full_logl2 = deepcopy(model.logl2) full_logsigman2 = deepcopy(model.logsigman2) ### VARIATIONAL GP ### grid = [1, 15] + list(range(30, 451, 30)) + [455] no_replicates = 10 LL_results = np.zeros([len(grid), no_replicates]) for grid_index, no_inducing in enumerate(grid): for replicate_index in range(no_replicates): varGP = variational_GP(x_train_normalised.data.numpy(), np.expand_dims( y_train_normalised.data.numpy(), 1), no_inducing=no_inducing, freeze_hyperparam=False) varGP.optimize_parameters(500, 'Adam', learning_rate=0.1) var_LL_lower_bound = varGP.Fv() LL_results[grid_index, replicate_index] = var_LL_lower_bound np.savetxt('LL_results_not_frozen.tsv', LL_results, delimiter='\t')
full_logl2 = deepcopy(model.logl2) full_logsigman2 = deepcopy(model.logsigman2) ### VARIATIONAL GP ### grid = [1, 15] + list(range(30, 451, 30)) + [455] no_replicates = 10 KL_results = np.zeros([len(grid), no_replicates]) for grid_index, no_inducing in enumerate(grid): for replicate_index in range(no_replicates): varGP = variational_GP(x_train_normalised.data.numpy(), np.expand_dims( y_train_normalised.data.numpy(), 1), no_inducing=no_inducing, freeze_hyperparam=True, logsigmaf2=full_logsigmaf2, logl2=full_logl2, logsigman2=full_logsigman2) varGP.optimize_parameters(500, 'Adam', learning_rate=0.01) var_pred_mean, var_pred_covar = varGP.joint_posterior_predictive( x_test_normalised.data.numpy(), noise=True) var_pred_mean = var_pred_mean * train_sd[-1] + train_mean[-1] #import pdb; pdb.set_trace() var_pred_covar = var_pred_covar * (train_sd[-1]**2) #var_pred_var = torch.diag(var_pred_covar) # Compute KL divergence #import pdb; pdb.set_trace()
train_inputs, train_outputs, test_inputs = pickle.load(f) no_train = train_outputs.size no_test = test_inputs.shape[0] # convert to torch tensors train_inputs = torch.Tensor(train_inputs) train_inputs = torch.unsqueeze(train_inputs, 1) # 1 dimensional data train_outputs = torch.Tensor(train_outputs) test_inputs = torch.Tensor(test_inputs) test_inputs = torch.unsqueeze(test_inputs, 1) # 1 dimensional data ### VARIATIONAL GP ### no_inducing = 15 varGP = variational_GP(train_inputs.data.numpy(), np.expand_dims(train_outputs.data.numpy(), 1), no_inducing=no_inducing, kernel='matern') var_pred_mean, var_pred_covar = varGP.joint_posterior_predictive( test_inputs.data.numpy()) # record initial inducing point locations initial_inducing = deepcopy(torch.squeeze(varGP.Xm).data.numpy()) varGP.optimize_parameters(10000, 'Adam', learning_rate=0.02) var_pred_mean, var_pred_covar = varGP.joint_posterior_predictive( test_inputs.data.numpy(), noise=True) # plot error bars with observation noise # final inducing points final_inducing = torch.squeeze(varGP.Xm).data.numpy()
### VARIATIONAL GP ### #Set random seed for reproducibility random.seed(0) grid = [1, 15] + list(range(30, 451, 30)) + [455] no_replicates = 10 SMSE_results = np.zeros([len(grid), no_replicates]) SNLP_results = np.zeros([len(grid), no_replicates]) for grid_index, no_inducing in enumerate(grid): for replicate_index in range(no_replicates): varGP = variational_GP(x_train_normalised.data.numpy(), np.expand_dims( y_train_normalised.data.numpy(), 1), no_inducing=no_inducing) varGP.optimize_parameters(1000, 'Adam', learning_rate=0.01) pred_mean, pred_covar = varGP.joint_posterior_predictive( x_test_normalised.data.numpy(), noise=True) pred_mean = torch.squeeze(pred_mean * train_sd[-1] + train_mean[-1]) pred_var = torch.diag(pred_covar) pred_var = pred_var * (train_sd[-1])**2 SNLP_varGP = SNLP(pred_mean, pred_var, y_test, train_mean[-1], train_sd[-1]) SMSE_varGP = SMSE(torch.Tensor(pred_mean), y_test) SMSE_results[grid_index, replicate_index] = SMSE_varGP SNLP_results[grid_index, replicate_index] = SNLP_varGP