Ejemplo n.º 1
0
    def test_data_manipulation(self):
        tol = 1e-1
        x = np.linspace(0, 1, 101).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)
        x_ = np.linspace(1.5, 2, 51).reshape((-1, 1))
        y_ = 1 + np.exp(-x_**2).reshape(-1)

        gp = MaternGP(x, y, noise_prior=(0.1, 0.1))

        tmp = gp.empty_data()
        self.assertEqual(tmp, gp)
        self.assertTrue(tuple(gp.train_x.shape), (0, 1))
        # GPyTorch fails when predicting with an empty dataset, so the following line fails if uncommented
        # gp.predict(x)

        gp.set_data(x, y)
        self.assertEqual(tuple(gp.train_x.shape), (len(x), 1))

        gp.optimize_hyperparameters(epochs=10)
        gp_pred = gp.predict(x_).mean.cpu().numpy()
        self.assertFalse(np.all(np.abs(gp_pred - y_) < tol))

        tmp = gp.append_data(x_, y_)
        # self.assertTrue(gp != tmp)
        # self.assertEqual(tuple(gp.train_x.shape), (len(x), 1))
        self.assertEqual(tuple(tmp.train_x.shape), (len(x) + len(x_), 1))

        tmp.optimize_hyperparameters(epochs=10)
        tmp_pred = tmp.predict(x_).mean.cpu().numpy()
        self.assertTrue(np.all(np.abs(tmp_pred - y_) < tol))
Ejemplo n.º 2
0
    def test_hyper_optimization_0(self):
        warnings.simplefilter('ignore', gpytorch.utils.warnings.GPInputWarning)

        tol = 1e-3
        x = np.linspace(0, 1, 101).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)

        gp = MaternGP(x, y, noise_constraint=(0, 1e-4))
        gp.optimize_hyperparameters(epochs=20)
        gp.optimize_hyperparameters(epochs=20, lr=0.01)

        predictions = gp.predict(x)
        y_pred = predictions.mean.cpu().numpy()

        passed = np.all(np.abs(y - y_pred) < tol)

        if passed:
            self.assertTrue(True)
        else:
            x = x.reshape(-1)
            f, ax = plt.subplots(1, 1, figsize=(4, 3))
            lower, upper = predictions.confidence_region()
            ax.plot(x, y, 'k*')
            ax.plot(x, y_pred, 'b')
            ax.fill_between(x,
                            lower.cpu().numpy(),
                            upper.cpu().numpy(),
                            alpha=0.5)
            ax.legend(['Observed Data', 'Mean', 'Confidence', 'Noise'])
            ax.grid(True)
            #plt.show()
            self.assertTrue(False)  # This test is necessarily failed
Ejemplo n.º 3
0
    def test_multi_dim_input(self):
        tol = 0.1
        lin = np.linspace(0, 1, 101)
        x = lin.reshape((-1, 1))
        x = x + x.T
        y = np.exp(-lin**2)

        gp = MaternGP(x, y, noise_prior=(0.1, 0.1))
        gp.optimize_hyperparameters(epochs=10)

        x_query = np.linspace(0.25, 0.75, 27)
        y_ = np.exp(-x_query**2)
        x_query = x_query.reshape((-1, 1)) + lin.reshape((1, -1))

        pred = gp.predict(x_query).mean.cpu().numpy()
        self.assertEqual(pred.shape, (27, ))
        self.assertTrue(np.all(np.abs(pred - y_) < tol))
Ejemplo n.º 4
0
    def test_hyper_optimization_1(self):
        warnings.simplefilter('ignore', gpytorch.utils.warnings.GPInputWarning)

        tol = 5e-3
        lengthscale = 1.5
        outputscale = 2
        x = np.linspace(-1, 1, 501).reshape((-1, 1))
        # Eventhough the data is generated by a Matern function, the lengthscale and outputscale learned by the Matern
        # kernel GP do NOT need to coincide with the ones used to generate the data: the learned ones correspond to the
        # influence of nearby points, not to the global structure of the data
        y = self._matern_52(x,
                            lengthscale=lengthscale,
                            outputscale=outputscale).reshape(-1)
        x_train = x[::2]
        y_train = y[::2]
        x_test = x[1::2]
        y_test = y[1::2]

        gp = MaternGP(x_train,
                      y_train,
                      nu=5 / 2,
                      noise_constraint=(0, 1e-5),
                      hyperparameters_initialization={
                          'covar_module.base_kernel.lengthscale': 1,
                          'covar_module.outputscale': 1
                      })
        gp.optimizer = torch.optim.Adam
        gp.optimize_hyperparameters(epochs=50, lr=0.01)
        gp.optimize_hyperparameters(epochs=50, lr=0.001)
        gp.optimize_hyperparameters(epochs=30, lr=0.0001)

        predictions = gp.predict(x_test)
        y_pred = predictions.mean.cpu().numpy()

        passed = np.all(np.abs(y_test - y_pred) < tol)

        if passed:
            self.assertTrue(True)
        else:
            print(f'Max(diff): {np.abs(y_test - y_pred).max()}')
            x_train = x_train.reshape(-1)
            x_test = x_test.reshape(-1)
            f, ax = plt.subplots(1, 1, figsize=(8, 8))
            lower, upper = predictions.confidence_region()
            ax.plot(x_train, y_train, 'k*')
            ax.plot(x_test, y_pred, 'g-*')
            ax.plot(x_test, y_test, 'r*')
            ax.fill_between(x_test,
                            lower.cpu().numpy(),
                            upper.cpu().numpy(),
                            alpha=0.5)
            ax.legend([
                'Observed Data', 'Prediction', 'Hidden data', 'Confidence',
                'Noise'
            ])
            ax.grid(True)
            #plt.show()
            self.assertTrue(False)