コード例 #1
0
    def test_fast_computation(self):
        from edge.utils import device, cuda
        if device != cuda:
            self.assertTrue(
                False, "This test should be run with device=cuda. "
                "See edge.utils.device.py to set this.")

        x = np.linspace(0, 1, 9000, dtype=np.float32).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)
        gp = MaternGP(x,
                      y,
                      noise_constraint=(0, 1e-3),
                      value_structure_discount_factor=0.5)
        x_ = np.linspace(0, 1, 20, dtype=np.float32).reshape((-1, 1))
        y_ = np.exp(-x_**2).reshape(-1)

        #with gpytorch.settings.fast_computations(solves=False):
        try:
            pred = gp.predict(x_).mean.cpu().numpy()
        except Exception as e:
            if DEBUG:
                try:
                    import pudb
                    pudb.post_mortem()
                except ImportError:
                    pass
            self.assertTrue(
                False, f'Prediction failed with the following error: {str(e)}')
        self.assertTrue(True)
コード例 #2
0
    def test_neighbor_erasing_dataset_1(self):
        x = np.linspace(0, 1, 100, dtype=np.float32).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)

        r = 0.0125

        gp = MaternGP(x,
                      y,
                      noise_constraint=(0, 1e-3),
                      dataset_type='neighborerasing',
                      dataset_params={'radius': r})
        x_ = np.linspace(0, 1, 20, dtype=np.float32).reshape((-1, 1))
        y_ = np.exp(-x_**2).reshape(-1)

        gp.append_data(x_, y_)
        distances = np.abs(gp.train_x.cpu().numpy().reshape((-1, 1)) -
                           x_.squeeze())

        self.assertTrue(np.all(distances[:-len(x_), :] >= r))
        self.assertTrue((gp.train_x.cpu().numpy()[-len(x_):, :] == x_).all())
        # Set to True to plot
        if False:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.scatter(
                gp.train_x.cpu().numpy()[-len(x_):],
                gp.train_x.cpu().numpy()[-len(x_):],
                color='r',
            )
            plt.scatter(gp.train_x.cpu().numpy()[:-len(x_)],
                        gp.train_x.cpu().numpy()[:-len(x_)],
                        color='b')
コード例 #3
0
    def test_hyper_optimization_1(self):
        warnings.simplefilter('ignore', gpytorch.utils.warnings.GPInputWarning)

        tol = 5e-3
        lengthscale = 1.5
        outputscale = 2
        x = np.linspace(-1, 1, 501).reshape((-1, 1))
        # Eventhough the data is generated by a Matern function, the lengthscale and outputscale learned by the Matern
        # kernel GP do NOT need to coincide with the ones used to generate the data: the learned ones correspond to the
        # influence of nearby points, not to the global structure of the data
        y = self._matern_52(x,
                            lengthscale=lengthscale,
                            outputscale=outputscale).reshape(-1)
        x_train = x[::2]
        y_train = y[::2]
        x_test = x[1::2]
        y_test = y[1::2]

        gp = MaternGP(x_train,
                      y_train,
                      nu=5 / 2,
                      noise_constraint=(0, 1e-5),
                      hyperparameters_initialization={
                          'covar_module.base_kernel.lengthscale': 1,
                          'covar_module.outputscale': 1
                      })
        gp.optimizer = torch.optim.Adam
        gp.optimize_hyperparameters(epochs=50, lr=0.01)
        gp.optimize_hyperparameters(epochs=50, lr=0.001)
        gp.optimize_hyperparameters(epochs=30, lr=0.0001)

        predictions = gp.predict(x_test)
        y_pred = predictions.mean.cpu().numpy()

        passed = np.all(np.abs(y_test - y_pred) < tol)

        if passed:
            self.assertTrue(True)
        else:
            print(f'Max(diff): {np.abs(y_test - y_pred).max()}')
            x_train = x_train.reshape(-1)
            x_test = x_test.reshape(-1)
            f, ax = plt.subplots(1, 1, figsize=(8, 8))
            lower, upper = predictions.confidence_region()
            ax.plot(x_train, y_train, 'k*')
            ax.plot(x_test, y_pred, 'g-*')
            ax.plot(x_test, y_test, 'r*')
            ax.fill_between(x_test,
                            lower.cpu().numpy(),
                            upper.cpu().numpy(),
                            alpha=0.5)
            ax.legend([
                'Observed Data', 'Prediction', 'Hidden data', 'Confidence',
                'Noise'
            ])
            ax.grid(True)
            #plt.show()
            self.assertTrue(False)
コード例 #4
0
    def test_data_manipulation(self):
        tol = 1e-1
        x = np.linspace(0, 1, 101).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)
        x_ = np.linspace(1.5, 2, 51).reshape((-1, 1))
        y_ = 1 + np.exp(-x_**2).reshape(-1)

        gp = MaternGP(x, y, noise_prior=(0.1, 0.1))

        tmp = gp.empty_data()
        self.assertEqual(tmp, gp)
        self.assertTrue(tuple(gp.train_x.shape), (0, 1))
        # GPyTorch fails when predicting with an empty dataset, so the following line fails if uncommented
        # gp.predict(x)

        gp.set_data(x, y)
        self.assertEqual(tuple(gp.train_x.shape), (len(x), 1))

        gp.optimize_hyperparameters(epochs=10)
        gp_pred = gp.predict(x_).mean.cpu().numpy()
        self.assertFalse(np.all(np.abs(gp_pred - y_) < tol))

        tmp = gp.append_data(x_, y_)
        # self.assertTrue(gp != tmp)
        # self.assertEqual(tuple(gp.train_x.shape), (len(x), 1))
        self.assertEqual(tuple(tmp.train_x.shape), (len(x) + len(x_), 1))

        tmp.optimize_hyperparameters(epochs=10)
        tmp_pred = tmp.predict(x_).mean.cpu().numpy()
        self.assertTrue(np.all(np.abs(tmp_pred - y_) < tol))
コード例 #5
0
    def test_multi_dim_input(self):
        tol = 0.1
        lin = np.linspace(0, 1, 101)
        x = lin.reshape((-1, 1))
        x = x + x.T
        y = np.exp(-lin**2)

        gp = MaternGP(x, y, noise_prior=(0.1, 0.1))
        gp.optimize_hyperparameters(epochs=10)

        x_query = np.linspace(0.25, 0.75, 27)
        y_ = np.exp(-x_query**2)
        x_query = x_query.reshape((-1, 1)) + lin.reshape((1, -1))

        pred = gp.predict(x_query).mean.cpu().numpy()
        self.assertEqual(pred.shape, (27, ))
        self.assertTrue(np.all(np.abs(pred - y_) < tol))
コード例 #6
0
        def test_large_ds_matern(self):
            x = np.linspace(0, 1, 9000, dtype=np.float32).reshape((-1, 1))
            y = np.exp(-x ** 2).reshape(-1)
            gp = MaternGP(
                x, y, noise_constraint=(0, 1e-3)
            )  
            x_ = np.linspace(0, 1, 20, dtype=np.float32).reshape((-1, 1))
            y_ = np.exp(-x_ ** 2).reshape(-1)
 
            try:
                pred = gp.predict(x_).mean.cpu().numpy()
            except Exception as e:
                if DEBUG:
                    import pudb
                    pudb.post_mortem()
                self.assertTrue(False, f'Prediction failed with the following error: {str(e)}')
            self.assertTrue(True)
コード例 #7
0
ファイル: policy_test.py プロジェクト: sheim/edge
def get_gp(x_train, y_train, noise):
    return MaternGP(
        x_train.reshape((-1, 1)),
        y_train,
        noise_prior=(noise**2, 1e-3),
        lengthscale_prior=(1, 1e-3),
        outputscale_prior=(1, 1e-3)
    )
コード例 #8
0
    def test_load_save(self):
        x = np.linspace(0, 1, 11)
        y = np.sin(2 * np.pi * x) + np.random.randn(len(x)) * 0.2

        model = MaternGP(x,
                         y,
                         noise_prior=(1, 0.1),
                         noise_constraint=0.5,
                         outputscale_constraint=(2.7, np.pi))
        save_file = tempfile.NamedTemporaryFile(suffix='.pth').name
        model.save(save_file)
        self.assertTrue(os.path.isfile(save_file))

        loaded = MaternGP.load(save_file, x, y)

        self.assertEqual(model.covar_module.outputscale,
                         loaded.covar_module.outputscale)
コード例 #9
0
    def test_hyper_optimization_0(self):
        warnings.simplefilter('ignore', gpytorch.utils.warnings.GPInputWarning)

        tol = 1e-3
        x = np.linspace(0, 1, 101).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)

        gp = MaternGP(x, y, noise_constraint=(0, 1e-4))
        gp.optimize_hyperparameters(epochs=20)
        gp.optimize_hyperparameters(epochs=20, lr=0.01)

        predictions = gp.predict(x)
        y_pred = predictions.mean.cpu().numpy()

        passed = np.all(np.abs(y - y_pred) < tol)

        if passed:
            self.assertTrue(True)
        else:
            x = x.reshape(-1)
            f, ax = plt.subplots(1, 1, figsize=(4, 3))
            lower, upper = predictions.confidence_region()
            ax.plot(x, y, 'k*')
            ax.plot(x, y_pred, 'b')
            ax.fill_between(x,
                            lower.cpu().numpy(),
                            upper.cpu().numpy(),
                            alpha=0.5)
            ax.legend(['Observed Data', 'Mean', 'Confidence', 'Noise'])
            ax.grid(True)
            #plt.show()
            self.assertTrue(False)  # This test is necessarily failed
コード例 #10
0
    def test_multivariate_normal_prior(self):
        x = np.linspace(0, 1, 100, dtype=np.float32).reshape((-1, 2))
        y = np.exp(-x @ x.T).reshape(-1)

        gp = MaternGP(x, y, lengthscale_prior=((1, 0.01), (10, 1)))
        kernel = gp.covar_module.base_kernel
        prior = kernel.lengthscale_prior
        self.assertIsInstance(gp.covar_module.base_kernel.lengthscale_prior,
                              gpytorch.priors.MultivariateNormalPrior)
        self.assertTrue((kernel.lengthscale.squeeze() == prior.mean).all())
コード例 #11
0
 def get_gp(self, actions=None, rewards=None, gamma=0.5, n=11):
     if actions is None or rewards is None:
         actions, rewards = self.get_examples(n)
     gp = MaternGP(
         train_x=actions,
         train_y=rewards,
         lengthscale_prior=(0.1, 0.01),
         noise_prior=(self.noise + 1e-6, 0.01),
         noise_constraint=(self.noise / 10, self.noise * 10 + 1e-4),
         value_structure_discount_factor=gamma,
     )
     return gp
コード例 #12
0
    def test_neighbor_erasing_dataset_2(self):
        x = np.linspace(0, 1, 100, dtype=np.float32).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)

        r = 0.0125

        gp = MaternGP(x,
                      y,
                      noise_constraint=(0, 1e-3),
                      dataset_type='neighborerasing',
                      dataset_params={'radius': r})
        x_ = np.linspace(0, 1, 20, dtype=np.float32).reshape((-1, 1))
        y_ = np.exp(-x_**2).reshape(-1)
        forgettable = [False] * len(y_)

        gp.append_data(x_, y_, forgettable=forgettable)

        x__ = np.linspace(0, 1, 21, dtype=np.float32).reshape((-1, 1))
        y__ = np.exp(-x_**2).reshape(-1)

        gp.append_data(x__, y__)

        all_present_x_ = all(
            x_ex.tolist() in gp.train_x.cpu().numpy().tolist() for x_ex in x_)
        all_present_x__ = all(
            x__ex.tolist() in gp.train_x.cpu().numpy().tolist()
            for x__ex in x__)

        self.assertTrue(all_present_x_ and all_present_x__)
コード例 #13
0
    def test_timeforgetting_dataset(self):
        x = np.linspace(0, 1, 100, dtype=np.float32).reshape((-1, 1))
        y = np.exp(-x**2).reshape(-1)

        gp = MaternGP(x,
                      y,
                      noise_constraint=(0, 1e-3),
                      dataset_type='timeforgetting',
                      dataset_params={'keep': 50})
        self.assertTrue((gp.train_x.cpu().numpy() == x[-50:]).all())
        self.assertTrue((gp.train_y.cpu().numpy() == y[-50:]).all())
        gp.append_data(x[:10], y[:10])
        self.assertTrue((gp.train_x.cpu().numpy() == np.vstack(
            (x[-40:], x[:10]))).all())
        self.assertTrue((gp.train_y.cpu().numpy() == np.hstack(
            (y[-40:], y[:10]))).all())
        gp.set_data(x[:75], y[:75])
        self.assertTrue((gp.train_x.cpu().numpy() == x[25:75]).all())
        self.assertTrue((gp.train_y.cpu().numpy() == y[25:75]).all())
コード例 #14
0
 def test_initialization(self):
     x = np.arange(18).reshape((6, 3))
     y = np.arange(6)
     gp = MaternGP(x, y)
     self.assertTrue(torch.is_tensor(gp.train_x))
     self.assertTrue(torch.is_tensor(gp.train_y))