def test_gp(plot=False, method='full'): """ Compares model prediction with an exact GP (without optimisation) """ # note that this test fails without latent noise in the case of full Gaussian np.random.seed(111) num_input_samples = 10 num_samples = 10000 gaussian_sigma = .2 X, Y, kernel = DataSource.normal_generate_samples(num_input_samples, gaussian_sigma, 1) kernel = [GPy.kern.RBF(1, variance=1., lengthscale=np.array((1.,)))] if method == 'full': m = SAVIGP_SingleComponent(X, Y, num_input_samples, UnivariateGaussian(np.array(gaussian_sigma)), kernel, num_samples, None, 0.001, True, True) if method == 'diag': m = SAVIGP_Diag(X, Y, num_input_samples, 1, UnivariateGaussian(np.array(gaussian_sigma)), kernel, num_samples, None, 0.001, True, True) # update model using optimal parameters # gp = SAVIGP_Test.gpy_prediction(X, Y, gaussian_sigma, kernel[0]) # gp_mean, gp_var = gp.predict(X, full_cov=True) # m.MoG.m[0,0] = gp_mean[:,0] # m.MoG.update_covariance(0, gp_var - gaussian_sigma * np.eye(10)) try: folder_name = 'test' + '_' + ModelLearn.get_ID() logger = ModelLearn.get_logger(folder_name, logging.DEBUG) Optimizer.optimize_model(m, 10000, logger, ['mog']) except KeyboardInterrupt: pass sa_mean, sa_var = m.predict(X) gp = SAVIGP_Test.gpy_prediction(X, Y, gaussian_sigma, deepcopy(kernel[0])) gp_mean, gp_var = gp.predict(X) mean_error = (np.abs(sa_mean - gp_mean)).sum() / sa_mean.shape[0] var_error = (np.abs(sa_var - gp_var)).sum() / gp_var.T.shape[0] if mean_error < 0.1: print bcolors.OKBLUE, "passed: mean gp prediction ", mean_error else: print bcolors.WARNING, "failed: mean gp prediction ", mean_error print bcolors.ENDC if var_error < 0.1: print bcolors.OKBLUE, "passed: var gp prediction ", var_error else: print bcolors.WARNING, "failed: var gp prediction ", var_error print bcolors.ENDC if plot: plot_fit(m) gp.plot() show(block=True)
# plots for an specific experiment # PlotOutput.plot_output_all('abalone_graph', Experiments.get_output_path(), # lambda x: x['experiment'] == 'abalone', False) def run_config(config): try: logger.info('started config: ' + str(config)) getattr(ExperimentSetup, config['method_to_run'])(config) logger.info('finished config: ' + str(config)) except Exception as e: logger.exception(config) if __name__ == '__main__': logger = ModelLearn.get_logger(ModelLearn.get_logger_path(), 'general_' + ModelLearn.get_ID(), logging.DEBUG) # uncomment to run experiments in parallel # ExperimentRunner.run_parallel(3) # runs an individual configuration # ExperimentRunner.boston_experiment() # ExperimentRunner.wisconsin_breast_experiment() # ExperimentRunner.USPS_experiment() # ExperimentRunner.mining_experiment() # ExperimentRunner.abalone_experiment() # ExperimentRunner.mnist_binary_inducing_experiment() ExperimentRunner.mnist_binary_experiment() # ExperimentRunner.sarcos_all_joins_experiment() # ExperimentRunner.sarcos_experiment()
# PlotOutput.plot_output_all('abalone_graph', Experiments.get_output_path(), # lambda x: x['experiment'] == 'abalone', False) def run_config(config): try: logger.info('started config: ' + str(config)) getattr(ExperimentSetup, config['method_to_run'])(config) logger.info('finished config: ' + str(config)) except Exception as e: logger.exception(config) if __name__ == '__main__': logger = ModelLearn.get_logger(ModelLearn.get_logger_path(), 'general_' + ModelLearn.get_ID(), logging.DEBUG) # uncomment to run experiments in parallel # ExperimentRunner.run_parallel(3) # runs an individual configuration ExperimentRunner.boston_experiment() # ExperimentRunner.wisconsin_breast_experiment() # ExperimentRunner.USPS_experiment() # ExperimentRunner.mining_experiment() # ExperimentRunner.abalone_experiment() # ExperimentRunner.mnist_binary_inducing_experiment() # ExperimentRunner.mnist_binary_experiment() # ExperimentRunner.sarcos_all_joins_experiment() # ExperimentRunner.sarcos_experiment()
# plots for an specific experiment # PlotOutput.plot_output_all('abalone_graph', Experiments.get_output_path(), # lambda x: x['experiment'] == 'abalone', False) def run_config(config): try: logger.info('started config: ' + str(config)) getattr(ModelLearn, config['method_to_run'])(config) logger.info('finished config: ' + str(config)) except Exception as e: logger.exception(config) if __name__ == '__main__': logger = ModelLearn.get_logger('general_' + ModelLearn.get_ID(), logging.DEBUG) # uncomment to run experiments in parallel # ExperimentRunner.run_parallel(3) # runs an individual configuration # ExperimentRunner.boston_experiment() # ExperimentRunner.wisconsin_breast_experiment() # ExperimentRunner.USPS_experiment() # ExperimentRunner.mining_experiment() # ExperimentRunner.abalone_experiment() # ExperimentRunner.mnist_binary_inducing_experiment() ExperimentRunner.mnist_binary_experiment() # ExperimentRunner.sarcos_all_joins_experiment() # ExperimentRunner.sarcos_experiment()