Пример #1
0
class GPSurrogate(BaseSurrogate):
    gp = None

    def __init__(self, num_issues, X, y):
        BaseSurrogate.__init__(self, num_issues, X, y)
        # Instantiate a Gaussian Process model.
        # TODO. A question we need to investigate is what kernel we should be using?
        # TODO. For now, to have a valid comparison, I am using the same kernel used by the bayesian optimization.
        # TODO. Note that I am using the Kernels as given by the skopt library, with the same parameters.
        cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
        other_kernel = Matern(length_scale=np.ones(num_issues),
                              length_scale_bounds=[(0.01, 100)] * num_issues,
                              nu=2.5)
        self.gp = GaussianProcessRegressor(
            kernel=cov_amplitude * other_kernel,
            normalize_y=True,
            n_restarts_optimizer=2,
            noise=0.000000001,
            random_state=np.random.mtrand._rand.randint(
                0,
                np.iinfo(np.int32).max))
        # Another option for gaussian process is using The default kernel here, i.e., RBF.
        # self.gp = GaussianProcessRegressor(kernel=None, n_restarts_optimizer=9)

    def update_surrogate(self):
        self.gp.fit(self.X, self.y)

    def query(self, x):
        x = np.array(x).reshape(1, -1)
        return self.gp.predict(np.atleast_2d(x))

    def getGP(self):
        return self.gp
Пример #2
0
def test_gp_regressor():
    rng = np.random.RandomState(0)
    X = np.asarray([
        ["ham", "spam", "ted"],
        ["ham", "ted", "ted"],
        ["ham", "spam", "spam"]])
    y = rng.randn(3)
    hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])

    gpr = GaussianProcessRegressor(hm)
    gpr.fit(X, y)
    assert_array_almost_equal(gpr.predict(X), y)
    assert_array_almost_equal(gpr.predict(X[:2]), y[:2])
Пример #3
0
    def init_model(self):
        """initializes the surrogate model of the gaussian process

        the model gets created with the right parameters, but is not fit with any data yet. the `base_model` will be
        cloned in `update_model` and fit with observation data
        """
        # n_dims == n_hparams
        n_dims = len(self.searchspace.keys())

        if self.interim_results:
            n_dims += 1  # add one dim for augumented budget

        cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))

        other_kernel = Matern(
            length_scale=np.ones(n_dims),
            length_scale_bounds=[(0.01, 100)] * n_dims,
            nu=2.5,
        )
        base_model = GaussianProcessRegressor(
            kernel=cov_amplitude * other_kernel,
            normalize_y=True,
            noise="gaussian",
            n_restarts_optimizer=2,
        )
        self.base_model = base_model
Пример #4
0
 def _get_gp(self) -> GaussianProcessRegressor:
     return GaussianProcessRegressor(
         kernel=self.kernel,
         normalize_y=False,
         optimizer=None,
         n_restarts_optimizer=-1,  # Do not use scikit-learn's optimization routine
         alpha=0,  # Governed by the kernel
         noise=None,
     )
Пример #5
0
 def __init__(self, num_issues, X, y):
     BaseSurrogate.__init__(self, num_issues, X, y)
     # Instantiate a Gaussian Process model.
     # TODO. A question we need to investigate is what kernel we should be using?
     # TODO. For now, to have a valid comparison, I am using the same kernel used by the bayesian optimization.
     # TODO. Note that I am using the Kernels as given by the skopt library, with the same parameters.
     cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
     other_kernel = Matern(length_scale=np.ones(num_issues),
                           length_scale_bounds=[(0.01, 100)] * num_issues,
                           nu=2.5)
     self.gp = GaussianProcessRegressor(
         kernel=cov_amplitude * other_kernel,
         normalize_y=True,
         n_restarts_optimizer=2,
         noise=0.000000001,
         random_state=np.random.mtrand._rand.randint(
             0,
             np.iinfo(np.int32).max))
Пример #6
0
 def _get_gp(self, alpha=0) -> GaussianProcessRegressor:
     return GaussianProcessRegressor(
         kernel=self.kernel,
         normalize_y=False,
         optimizer=None,
         n_restarts_optimizer=-1,  # Do not use scikit-learn's optimization routine
         alpha=alpha,    # The original is 0 (Governed by the kernel). Fix RBF kernel error.
         noise=None,
         random_state=self.rng,
     )
Пример #7
0
def test_gp_regressor():
    rng = np.random.RandomState(0)
    X = np.asarray([["ham", "spam", "ted"], ["ham", "ted", "ted"],
                    ["ham", "spam", "spam"]])
    y = rng.randn(3)
    hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])
    if UseOrdinalEncoder:
        enc = OrdinalEncoder()
        enc.fit(X)

    gpr = GaussianProcessRegressor(hm)
    if UseOrdinalEncoder:
        gpr.fit(enc.transform(X), y)
        assert_array_almost_equal(gpr.predict(enc.transform(X)), y)
        assert_array_almost_equal(gpr.predict(enc.transform(X[:2])), y[:2])
    else:
        gpr.fit(X, y)
        assert_array_almost_equal(gpr.predict(X), y)
        assert_array_almost_equal(gpr.predict(X[:2]), y[:2])
Пример #8
0
def test_gp_regressor():
    rng = np.random.RandomState(0)
    X = np.asarray([["ham", "spam", "ted"], ["ham", "ted", "ted"],
                    ["ham", "spam", "spam"]])
    y = rng.randn(3)
    hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])

    gpr = GaussianProcessRegressor(hm)
    gpr.fit(X, y)
    assert_array_almost_equal(gpr.predict(X), y)
    assert_array_almost_equal(gpr.predict(X[:2]), y[:2])