예제 #1
0
 def test_eval(self):
     """ Test for prediction. """
     self.report(
         'Prediction for fitted Simple vs MF GPs. Probabilistic test, might fail.'
     )
     num_successes = 0
     num_experiments = 0
     for ds_idx, ds in enumerate(self.datasets):
         for kernel_type in ['se', 'matern']:
             simple_gp = fit_simple_gp_with_dataset(ds, kernel_type)
             simple_preds, _ = simple_gp.eval(ds.ZX_te)
             simple_err = compute_average_sq_prediction_error(
                 ds.Y_te, simple_preds)
             mfgp = fit_mfgp_with_dataset(ds, kernel_type)
             mf_preds, _ = mfgp.eval_at_fidel(ds.Z_te, ds.X_te)
             mf_err = compute_average_sq_prediction_error(ds.Y_te, mf_preds)
             # Book keep and report
             success = abs(mf_err - simple_err) < 1e-2
             num_successes += success
             num_experiments += 1
             self.report((
                 '%d. (N,DZ,DX)= %s, kern=%s, simple-err=%0.4f, mfgp-err=%0.4f, '
                 + 'succ=%d.') % (ds_idx + 1,
                                  str(ds.Z_tr.shape + (ds.X_tr.shape[1], )),
                                  kernel_type, simple_err, mf_err, success),
                         'test_result')
     assert num_successes > 0.6 * num_experiments
예제 #2
0
def main():
    """ Main function. """
    # pylint: disable=too-many-locals
    # pylint: disable=maybe-no-member
    np.random.seed(0)
    reporter = BasicReporter()
    Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te = get_data()
    sgp_options = load_options(gp_instances.all_simple_gp_args,
                               'GP',
                               reporter=reporter)
    mfgp_options = load_options(mf_gp.all_mf_gp_args,
                                'MFGP',
                                reporter=reporter)
    mfgp_options.mean_func_type = 'median'
    # Fit the GPs.
    sgp_fitter = gp_instances.SimpleGPFitter(ZX_tr,
                                             Y_tr,
                                             sgp_options,
                                             reporter=reporter)
    sgp, opt_s = sgp_fitter.fit_gp()
    mfgp_fitter = mf_gp.MFGPFitter(Z_tr,
                                   X_tr,
                                   Y_tr,
                                   mfgp_options,
                                   reporter=reporter)
    mfgp, opt_mf = mfgp_fitter.fit_gp()
    opt_s = (np.array(opt_s).round(4))
    opt_mf = (np.array(opt_mf).round(4))
    s_bounds = sgp_fitter.hp_bounds.round(3)
    mf_bounds = mfgp_fitter.hp_bounds.round(3)
    # Print out some fitting statistics
    _print_str_results(reporter, 'Opt-pts', str(opt_s), str(opt_mf))
    _print_str_results(reporter, 'Opt-bounds', str(s_bounds), str(mf_bounds))

    # The marginal likelihoods
    sgp_lml = sgp.compute_log_marginal_likelihood()
    mfgp_lml = mfgp.compute_log_marginal_likelihood()
    _print_float_results(reporter, 'Log_Marg_Like', sgp_lml, mfgp_lml)
    # Train errors
    s_pred, _ = sgp.eval(ZX_tr)
    mf_pred, _ = mfgp.eval_at_fidel(Z_tr, X_tr)
    sgp_tr_err = compute_average_sq_prediction_error(Y_tr, s_pred)
    mfgp_tr_err = compute_average_sq_prediction_error(Y_tr, mf_pred)
    _print_float_results(reporter, 'Train Error', sgp_tr_err, mfgp_tr_err)
    # Test errors
    s_pred, _ = sgp.eval(ZX_te)
    mf_pred, _ = mfgp.eval_at_fidel(Z_te, X_te)
    sgp_te_err = compute_average_sq_prediction_error(Y_te, s_pred)
    mfgp_te_err = compute_average_sq_prediction_error(Y_te, mf_pred)
    _print_float_results(reporter, 'Test Error', sgp_te_err, mfgp_te_err)
예제 #3
0
 def test_eval(self):
   """ Tests the evaluation. """
   self.report('MFGP.eval_at_fidel: Probabilistic test, might fail sometimes.')
   num_successes = 0
   for ds in self.datasets:
     curr_gp = build_mfgp_with_dataset(ds)
     curr_pred, _ = curr_gp.eval_at_fidel(ds.Z_te, ds.X_te)
     curr_err = compute_average_sq_prediction_error(ds.Y_te, curr_pred)
     const_err = compute_average_sq_prediction_error(ds.Y_te, ds.Y_tr.mean())
     success = curr_err < const_err
     self.report(('(N,DZ,DX)=' + str(ds.Z_tr.shape + (ds.X_tr.shape[1],)) +
                  ':: MFGP-err= ' + str(curr_err) + ',   Const-err= ' + str(const_err) +
                  ',  success=' + str(success)), 'test_result')
     num_successes += int(success)
   assert num_successes > 0.6 *len(self.datasets)
예제 #4
0
 def test_compute_average_sq_prediction_error(self):
     """ Tests compute_average_sq_prediction_error. """
     self.report('compute_average_sq_prediction_error')
     Y1 = [0, 1, 2]
     Y2 = [2, 0, 1]
     res = general_utils.compute_average_sq_prediction_error(Y1, Y2)
     assert np.abs(res - 2.0) < 1e-5
예제 #5
0
 def test_eval(self):
   """ Test for prediction. """
   self.report('Prediction for fitted Simple vs MF GPs. Probabilistic test, might fail.')
   num_successes = 0
   for ds in self.datasets:
     simple_gp = fit_simple_gp_with_dataset(ds)
     simple_preds, _ = simple_gp.eval(ds.ZX_te)
     simple_err = compute_average_sq_prediction_error(ds.Y_te, simple_preds)
     fitted_gp = fit_mfgp_with_dataset(ds)
     fitted_preds, _ = fitted_gp.eval_at_fidel(ds.Z_te, ds.X_te)
     fitted_err = compute_average_sq_prediction_error(ds.Y_te, fitted_preds)
     success = abs(fitted_err - simple_err) < 1e-2
     self.report('(N,DZ,DX)= %s, simple-err=%0.4f, mfgp-err=%0.4f, succ=%d'%(
       str(ds.Z_tr.shape + (ds.X_tr.shape[1],)), simple_err, fitted_err, success),
       'test_result')
     num_successes += success
   assert num_successes > 0.6 * len(self.datasets)
예제 #6
0
 def _test_marginal_likelihood(self,
                               get_gp_func1,
                               get_gp_func2,
                               descr1,
                               descr2,
                               compare_fits=True):
     """ Test for marginal likelihood. """
     num_lml_successes = 0
     num_err_successes = 0
     num_experiments = 0
     for ds_idx, ds in enumerate(self.datasets):
         for kernel_type in ['se', 'matern']:
             gp1 = get_gp_func1(ds, kernel_type)
             gp2 = get_gp_func2(ds, kernel_type)
             lml1 = gp1.compute_log_marginal_likelihood()
             lml2 = gp2.compute_log_marginal_likelihood()
             preds1, _ = gp1.eval(ds.ZX_te)
             preds2, _ = gp2.eval(ds.ZX_te)
             err1 = compute_average_sq_prediction_error(ds.Y_te, preds1)
             err2 = compute_average_sq_prediction_error(ds.Y_te, preds2)
             lml_success = lml1 <= lml2
             err_success = err1 >= err2
             self.report('%d. (N,DZ,DX)= %s, kern=%s %s-lml=%0.4f, %s-lml=%0.4f, succ=%d'\
              %(ds_idx+1, str(ds.Z_tr.shape + (ds.X_tr.shape[1],)), kernel_type, descr1, lml1,
                descr2, lml2, lml_success), 'test_result')
             self.report(
                 '  %s-err = %0.4f, %s-err = %0.4f, err_succ=%d.' %
                 (descr1, err1, descr2, err2, err_success), 'test_result')
             self.report('  %s GP :: %s' % (descr1, str(gp1)),
                         'test_result')
             self.report('  %s GP:: %s' % (descr2, str(gp2)), 'test_result')
             num_lml_successes += lml_success
             num_err_successes += err_success
             num_experiments += 1
     if compare_fits:
         assert num_lml_successes > 0.6 * num_experiments
         assert num_err_successes > 0.6 * num_experiments