Пример #1
0
    def __init__(self):
        """Initializes the algorithm with a parameter configuration. """

        # TODO: enter your code here
        # speed threshold
        epsilon = 0.01
        self.kappa = 1.2 + epsilon  # 1.21 / 1.20

        # Validation Function F
        self.f_noise = 0.5  # 0.5 / 0.15 /0.4 changing this change the results, higher noises gave better results
        self.f_var = 0.5
        self.f_lenscale = 0.5
        self.f_nu = 2.5

        k_1 = Matern(length_scale=self.f_lenscale,
                     length_scale_bounds="fixed",
                     nu=self.f_nu)
        k_2 = ConstantKernel(constant_value=self.f_var,
                             constant_value_bounds="fixed")
        k_3 = WhiteKernel(noise_level=self.f_noise)
        self.f_kernel = Sum(Product(k_1, k_2), k_3)
        # Default GaussianProcessRegressor (check argument possibilities)
        self.gp_f = GaussianProcessRegressor(kernel=self.f_kernel,
                                             alpha=1e-10,
                                             optimizer='fmin_l_bfgs_b',
                                             n_restarts_optimizer=0,
                                             normalize_y=False,
                                             copy_X_train=True,
                                             random_state=None)

        # Speed Function V
        self.v_noise = 0.0001
        self.v_var = np.sqrt(2)
        self.v_lenscale = 0.5
        self.v_mean = 1.5
        self.v_nu = 2.5

        kv_1 = Matern(length_scale=self.v_lenscale,
                      length_scale_bounds=[1e-5, 1e5],
                      nu=self.v_nu)
        kv_2 = ConstantKernel(constant_value=self.v_var)
        kv_3 = WhiteKernel(noise_level=self.v_noise)
        kv_4 = ConstantKernel(constant_value=self.v_mean)

        self.v_kernel = Sum(Sum(kv_4, Product(kv_1, kv_2)), kv_3)
        # Default GaussianProcessRegressor (check argument possibilities)
        self.gp_v = GaussianProcessRegressor(kernel=self.v_kernel,
                                             alpha=1e-10,
                                             optimizer='fmin_l_bfgs_b',
                                             n_restarts_optimizer=0,
                                             normalize_y=False,
                                             copy_X_train=True,
                                             random_state=None)

        # Create the array to store data (x, f and v) + a boolean to be used later
        self.delete_first_raw = False
        self.xfv = np.zeros([1, 3])
Пример #2
0
 def test_kernel_ker2_def_ort(self):
     ker = Sum(
         CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                    length_scale_bounds=(1e-3, 1e3)),
         CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                    length_scale_bounds=(1e-3, 1e3)))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=get_opset_number_from_onnx())
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))],
         outputs=[('Y', FloatTensorType([None, None]))],
         target_opset=get_opset_number_from_onnx())
     model_onnx.ir_version = get_ir_version_from_onnx()
     sess = _capture_output(
         lambda: OnnxInference(model_onnx.SerializeToString(),
                               runtime="onnxruntime2"), 'c')[0]
     try:
         res = sess.run({'X': Xtest_.astype(numpy.float32)})
     except RuntimeError as e:
         if "Got invalid dimensions for input" in str(e):
             # probable bug somewhere
             return
         raise e
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2, decimal=5)
Пример #3
0
    def _cast(self, n, X, y):
        """
        Evaluates and optimizes all legitimate combinations of length `n`

        :param n: The length of pipelines
        :param X: Training data
        :param y: Observed values
        :return: None
        """
        from .structsearch import SurrogateRandomCV, BoxSample, CompactSample
        from importlib import import_module
        if self.couldBfirst == []:
            from sklearn.pipeline import Pipeline
        else:
            from imblearn.pipeline import Pipeline
        from sklearn.model_selection import RandomizedSearchCV
        if self.surrogates is None:
            from numpy import logspace
            from sklearn.gaussian_process import GaussianProcessRegressor
            from sklearn.kernel_ridge import KernelRidge
            from sklearn.gaussian_process.kernels import Matern, Sum, ExpSineSquared, WhiteKernel
            param_grid_gpr = {"alpha": logspace(-8, 1, 20),
                              "kernel": [Sum(Matern(length_scale=l, nu=p), WhiteKernel(noise_level=q))
                                         for l in logspace(-3, 3, 20)
                                         for p in [0.5, 1.5, 2.5]
                                         for q in logspace(-3, 1.5, 20)]}
            GPR = RandomizedSearchCV(GaussianProcessRegressor(), param_distributions=param_grid_gpr, n_iter=20, cv=2)
            param_grid_krr = {"alpha": logspace(-4, 0, 10),
                              "kernel": [Sum(Matern(), ExpSineSquared(l, p))
                                         for l in logspace(-2, 2, 20)
                                         for p in logspace(0, 2, 20)]}
            KRR = RandomizedSearchCV(KernelRidge(), param_distributions=param_grid_krr, n_iter=30, cv=2)
            self.surrogates = [(KRR, 35, CompactSample, 'L-BFGS-B'), (GPR, 50, BoxSample, 'L-BFGS-B')]
            self.min_random_evals = 10
        Pop = []
        candidates = self.words.Generate(n)
        for cnddt in candidates:
            if self._validate_sequence(cnddt):
                Pop.append(cnddt)
        for seq in Pop:
            if not self._validate_sequence(seq):
                continue
            best_mdl, best_scr = self.optimize_pipeline(seq, X, y)
            self.models[seq] = (best_mdl, best_scr)
            if self.verbose > 0:
                print("score:%f" % best_scr)
                print(best_mdl)
Пример #4
0
    def plot_action_correlations(self):

        # simply chop off all the extra numbers untill they are the same size
        data = self.iter_data.all_data
        is_player_0 = data['Player'] == 0
        player_df = data[is_player_0]
        adversary_df = data[is_player_0 == False]
        cutoff = min(len(player_df), len(adversary_df))
        player_df = player_df.iloc[:cutoff - 1]
        adversary_df = adversary_df.iloc[:cutoff - 1]

        # get data into right format
        player_policy = list(zip(*player_df['PolicyAction']))[self.default_action]
        adv_policy = list(zip(*adversary_df['PolicyAction']))[self.default_action]
        player_mcts = list(zip(*player_df['MCTSAction']))[self.default_action]
        adv_mcts = list(zip(*adversary_df['MCTSAction']))[self.default_action]
        player_policy, adv_policy = np.array(player_policy), np.array(adv_policy)
        player_mcts, adv_mcts = np.array(player_mcts), np.array(adv_mcts)

        # Instantiate a Gaussian Process model, note DotProduct is just <x, y> == Rotation of linear kernel therefore good for this
        # kernel = Sum(DotProduct(sigma_0=0.1), WhiteKernel(noise_level=0.1))
        kernel = Sum(Product(DotProduct(sigma_0=0.1), RBF(0.1, (1e-2, 1e1))), WhiteKernel(noise_level=0.5))
        gp_policy = GaussianProcessRegressor(kernel=kernel, alpha=0.25 ** 2, n_restarts_optimizer=1)
        gp_mcts = GaussianProcessRegressor(kernel=kernel, alpha=0.25 ** 2, n_restarts_optimizer=1)

        # Fit to data using Maximum Likelihood Estimation of the parameters
        X_policy, X_mcts = np.atleast_2d(adv_policy).T, np.atleast_2d(adv_mcts).T
        gp_policy.fit(X_policy, player_policy)
        gp_mcts.fit(X_mcts, player_mcts)
        x = np.atleast_2d(np.linspace(0, 1, 100)).T
        y_policy, s_policy = gp_policy.predict(x, return_std=True)
        y_mcts, s_mcts = gp_mcts.predict(x, return_std=True)

        # CALCULATE CORRELATION
        corr_player = np.corrcoef(player_policy, adv_policy)[0][1]  # take a non-diagonal element
        corr_mcts = np.corrcoef(player_mcts, adv_mcts)[0][1]  # take a non-diagonal element
        correlation = [corr_player, corr_mcts]

        # SCATTER PLOTS AND FITS
        titles = ['Policy Predictions', 'MCTS Predictions']

        fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(17, 8))
        player_actions, adv_actions = [player_policy, player_mcts], [adv_policy, adv_mcts]
        y_pred, sigma = [y_policy, y_mcts], [s_policy, s_mcts]
        for axis in range(len(axes)):
            axes[axis].scatter(adv_actions[axis], player_actions[axis], marker='x')
            axes[axis].set_title("{}, Correlation Coef: {}".format(titles[axis], correlation[axis]))

            # Plot the function, the prediction and the 95% confidence interval based on the MSE
            axes[axis].plot(x, y_pred[axis], 'b-', label='GP Prediction')
            axes[axis].fill(np.concatenate([x, x[::-1]]),
                            np.concatenate([y_pred[axis] - 1.9600 * sigma[axis],
                                            (y_pred[axis] + 1.9600 * sigma[axis])[::-1]]),
                            alpha=.5, fc='b', ec='None', label='95% confidence interval')
            axes[axis].set_xlabel('p(Adversary Action = Left)')
            axes[axis].set_ylabel('p(Player Action = Left)')
            axes[axis].set_ylim(0, 1)
            axes[axis].set_xlim(0, 1)
            axes[axis].legend()
Пример #5
0
def test_sum():

    # define rbf and dot product custom kernels, and set hyperparameters
    custom_rbf_kernel = generate_kernel(rbf, rbf_grad)
    custom_dot_prod_kernel = generate_kernel(dot_prod, dot_prod_grad)

    sum_custom_kernel = Sum(custom_dot_prod_kernel, custom_rbf_kernel)
    model = GaussianProcessRegressor(kernel=sum_custom_kernel, random_state=0)
    model.fit(X_train, Y_train)
    preds1 = model.predict(X_test)

    sum_kernel = Sum(DotProduct(), RBF())
    model = GaussianProcessRegressor(kernel=sum_kernel, random_state=0)
    model.fit(X_train, Y_train)
    preds2 = model.predict(X_test)

    assert (np.all(np.isclose(preds1, preds2)))
 def test_kernel_ker12_def(self):
     ker = (Sum(CK(0.1, (1e-3, 1e3)), CK(0.1, (1e-3, 1e3)) *
                RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))],
         outputs=[('Y', FloatTensorType([None, None]))])
     sess = OnnxInference(model_onnx.SerializeToString())
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2)
 def test_kernel_ker12_def(self):
     ker = (Sum(
         C(0.1, (1e-3, 1e3)),
         C(0.1, (1e-3, 1e3)) *
         RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32)
     model_onnx = onx.to_onnx(inputs=[('X', FloatTensorType([None, None]))],
                              dtype=np.float32)
     sess = InferenceSession(model_onnx.SerializeToString())
     res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
     m1 = res
     m2 = ker(Xtest_)
     assert_almost_equal(m1, m2, decimal=5)
Пример #8
0
    def test_gpr_rbf_unfitted(self):

        se = (C(1.0, (1e-3, 1e3)) *
              RBF(length_scale=10, length_scale_bounds=(1e-3, 1e3)))
        kernel = (Sum(se, C(0.1, (1e-3, 1e3)) *
                  RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))))

        gp = GaussianProcessRegressor(alpha=1e-5, kernel=kernel,
                                      n_restarts_optimizer=25,
                                      normalize_y=True)

        # return_cov=False, return_std=False
        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([]))],
            target_opset=_TARGET_OPSET_)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(Xtest_.astype(np.float32), gp, model_onnx,
                            verbose=False,
                            basename="SklearnGaussianProcessRBFUnfitted")

        # return_cov=True, return_std=True
        options = {GaussianProcessRegressor: {"return_std": True,
                                              "return_cov": True}}
        try:
            to_onnx(gp, Xtrain_.astype(np.float32), options=options,
                    target_opset=TARGET_OPSET)
        except RuntimeError as e:
            assert "Not returning standard deviation" in str(e)

        # return_std=True
        options = {GaussianProcessRegressor: {"return_std": True}}
        model_onnx = to_onnx(
            gp, options=options,
            initial_types=[('X', FloatTensorType([None, None]))],
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float32),
                           predict_attributes=options[
            GaussianProcessRegressor])

        # return_cov=True
        options = {GaussianProcessRegressor: {"return_cov": True}}
        # model_onnx = to_onnx(gp, Xtrain_.astype(np.float32), options=options)
        model_onnx = to_onnx(
            gp, options=options,
            initial_types=[('X', FloatTensorType([None, None]))],
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float32),
                           predict_attributes=options[
            GaussianProcessRegressor])
    def test_kernel_ker2_def_ort1(self):
        ker = Sum(
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                       length_scale_bounds=(1e-3, 1e3)),
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                       length_scale_bounds=(1e-3, 1e3))
        )
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))],
            outputs=[('Y', FloatTensorType([None, None]))])
        sess = OnnxInference(model_onnx.SerializeToString(),
                             runtime="onnxruntime1")

        rows = []

        def myprint(*args, **kwargs):
            rows.append(" ".join(map(str, args)))

        res = sess.run({'X': Xtest_.astype(numpy.float32)},
                       intermediate=True, verbose=1, fLOG=myprint)
        self.assertGreater(len(rows), 2)
        m1 = res['Y']
        self.assertNotEmpty(m1)
        self.assertGreater(len(res), 2)
        # m2 = ker(Xtest_)
        # self.assertEqualArray(m1, m2, decimal=5)

        cpu = OnnxInference(model_onnx.SerializeToString())
        sbs = side_by_side_by_values(
            [cpu, sess], inputs={'X': Xtest_.astype(numpy.float32)})
        self.assertGreater(len(sbs), 2)
        self.assertIsInstance(sbs, list)
        self.assertIsInstance(sbs[0], dict)
        self.assertIn('step', sbs[0])
        self.assertIn('step', sbs[1])
        self.assertIn('metric', sbs[0])
        self.assertIn('metric', sbs[1])
        self.assertIn('cmp', sbs[0])
        self.assertIn('cmp', sbs[1])

        sess3 = OnnxInference(model_onnx.SerializeToString(),
                              runtime="onnxruntime2")
        sbs = side_by_side_by_values(
            [cpu, sess, sess3], inputs={'X': Xtest_.astype(numpy.float32)})
        self.assertNotEmpty(sbs)

        inputs = {'X': Xtest_.astype(numpy.float32)}
        sbs = side_by_side_by_values(
            [(cpu, inputs), (sess, inputs), (sess3, inputs)])
        self.assertNotEmpty(sbs)
Пример #10
0
 def _eval(ppl):
     if self.couldBfirst == []:
         from sklearn.pipeline import Pipeline
     else:
         from imblearn.pipeline import Pipeline
     from sklearn.model_selection import RandomizedSearchCV
     if self.surrogates is None:
         from numpy import logspace
         from sklearn.gaussian_process import GaussianProcessRegressor
         from sklearn.kernel_ridge import KernelRidge
         from sklearn.gaussian_process.kernels import Matern, Sum, ExpSineSquared, WhiteKernel
         param_grid_gpr = {"alpha": logspace(-8, 1, 20),
                           "kernel": [Sum(Matern(length_scale=l_, nu=p), WhiteKernel(noise_level=q))
                                      for l_ in logspace(-3, 3, 20)
                                      for p in [0.5, 1.5, 2.5]
                                      for q in logspace(-3, 1.5, 20)]}
         GPR = RandomizedSearchCV(GaussianProcessRegressor(), param_distributions=param_grid_gpr, n_iter=20,
                                  cv=2)
         param_grid_krr = {"alpha": logspace(-4, 0, 10),
                           "kernel": [Sum(Matern(), ExpSineSquared(l_, p))
                                      for l_ in logspace(-2, 2, 20)
                                      for p in logspace(0, 2, 20)]}
         KRR = RandomizedSearchCV(KernelRidge(), param_distributions=param_grid_krr, n_iter=30, cv=2)
         self.surrogates = [(KRR, 35, CompactSample, 'L-BFGS-B'), (GPR, 50, BoxSample, 'L-BFGS-B')]
         self.min_random_evals = 10
     from collections import OrderedDict
     fitted = OrderedDict([])
     for seq in ppl:
         best_mdl, best_scr = self.optimize_pipeline(seq, X_, y_)
         if seq not in self.models:
             self.models[seq] = (best_mdl, best_scr)
         if self.verbose > 0:
             print("score:%f" % best_scr)
             print(best_mdl)
         fitted[seq] = -best_scr
     return fitted
Пример #11
0
    def test_kernel_ker2_def(self):
        ker = Sum(
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                       length_scale_bounds=(1e-3, 1e3)),
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                       length_scale_bounds=(1e-3, 1e3))
        )
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                             op_version=get_opset_number_from_onnx())
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))],
            outputs=[('Y', FloatTensorType([None, None]))],
            target_opset=get_opset_number_from_onnx())
        sess = OnnxInference(model_onnx.SerializeToString())

        res = sess.run({'X': Xtest_.astype(numpy.float32)})
        m1 = res['Y']
        m2 = ker(Xtest_)
        self.assertEqualArray(m1, m2)

        res = sess.run({'X': Xtest_.astype(numpy.float32)}, intermediate=True)
        self.assertGreater(len(res), 30)
        self.assertIsInstance(res, dict)
Пример #12
0
    def __init__(self):
        """Initializes the algorithm with a parameter configuration. """

        # TODO: enter your code here
        #         #noise perturbation sttdev
        #         self.noise_nu_v          = 0.0001
        #         self.noise_nu_f          = 0.15

        #         #f's kernel
        #         self.kernel_variance_f    = 0.5
        #         self.kernel_lenghtscale_f = 0.5
        #         self.kernel_smoothness_f  = 2.5

        #         #v's constant mean
        #         self.meanean_v        = 1.5
        #         #v's kernel
        #         self.kernel_variance_v    = math.sqrt(2)
        #         self.kernel_lenghtscale_v = 0.5
        #         self.kernel_smoothness_v  = 2.5

        #         self.K = 1.2 #minimum speed K
        #         #init x, v, f tensors as None as no data yet (--> add_data_point handles)
        self.x = np.random.rand(1) * 5
        self.v = np.random.rand(1)

        #         #objective function's or surrogate function's values
        self.f = np.random.rand(1) + 1.5

        self.xi = 0.01

        #         #self.model_v = None
        #         #self.model_f = None ? ? sait aps
        #         self.likelihood_f = gpytorch.likelihoods.GaussianLikelihood()
        #         self.likelihood_v = gpytorch.likelihoods.GaussianLikelihood()
        #         #init the kernels of f and v

        #         #init the gaussian process regressors of f and v
        #         #------test1-------
        #         self.kernel_f = gpytorch.kernels.ScaleKernel(base_kernel = gpytorch.kernels.MaternKernel(nu=self.kernel_smoothness_f, length_scale=torch.Tensor([self.kernel_lenghtscale_f])), output_scale = self.kernel_variance_f)

        #         self.kernel_v = gpytorch.kernels.ScaleKernel(base_kernel = gpytorch.kernels.MaternKernel(nu=self.kernel_smoothness_v, length_scale=self.kernel_lenghtscale_v), kernels.ConstantKernel(self.kernel_variance_v))

        #         self.gp_f = ExactGPModel(kernel = self.kernel_f, train_x = self.x, train_y = self.f, likelihood = self.likelihood_f)
        #         self.gp_v = ExactGPModel(mean=self.mean_v, kernel=self.kernel_v, train_x=self.x, train_y=self.v, likelihood=self.likelihood_v)
        #------test2-------
        #self.kernel_f = Matern(nu=self.kernel_smoothness_f, length_scale=self.kernel_lenghtscale_f)* kernels.ConstantKernel(self.kernel_variance_f)

        #self.kernel_v = Matern(nu=self.kernel_smoothness_v, length_scale=self.kernel_lenghtscale_v) * kernels.ConstantKernel(self.kernel_variance_v)
        #self.gp_f = GaussianProcessRegressor(kernel = self.kernel_f, alpha = self.noise_nu_f * self.noise_nu_f)
        #self.gp_v = GaussianProcessRegressor(kernel = self.kernel_v + kernels.ConstantKernel(self.mean_v), alpha = self.noise_nu_v * self.noise_nu_v)

        ###self.x0 = domain[:, 0] + (domain[:, 1] - domain[:, 0]) * \
        ###         np.random.rand(domain.shape[0])
        ###self.gp_f.fit(x0, 0)
        ###self.gp_v.fit(x0, self.mean_v)

        #self.f = matern...
        #self.v = matern + mean...
        #self.gp = gaussian process... :p
        #ok c bon avec le ScaleKernel

        # TODO: enter your code here
        #
        epsilon = 1e-7
        self.kappa = 1.2 + epsilon

        # Validation Function F
        self.f_noise = 0.15
        self.f_var = 0.5
        self.f_lenscale = 0.5
        self.f_nu = 2.5

        k_1 = Matern(length_scale=self.f_lenscale,
                     length_scale_bounds="fixed",
                     nu=self.f_nu)
        k_2 = ConstantKernel(constant_value=self.f_var,
                             constant_value_bounds="fixed")
        k_3 = WhiteKernel(noise_level=self.f_noise)
        self.f_kernel = Sum(Product(k_1, k_2), k_3)
        # Default GaussianProcessRegressor (check argument possibilities)
        self.gp_f = GaussianProcessRegressor(kernel=self.f_kernel)

        # Speed Function V
        self.v_noise = 0.0001
        self.v_var = np.sqrt(2)
        self.v_lenscale = 0.5
        self.v_mean = 1.5
        self.v_nu = 2.5

        kv_1 = Matern(length_scale=self.v_lenscale,
                      length_scale_bounds="fixed",
                      nu=self.v_nu)
        kv_2 = ConstantKernel(constant_value=self.v_var,
                              constant_value_bounds="fixed")
        kv_3 = WhiteKernel(noise_level=self.v_noise)
        kv_4 = ConstantKernel(constant_value=self.v_mean,
                              constant_value_bounds="fixed")

        self.v_kernel = Sum(Sum(kv_4, Product(kv_1, kv_2)), kv_3)
        # Default GaussianProcessRegressor (check argument possibilities)
        self.gp_v = GaussianProcessRegressor(kernel=self.v_kernel)
Пример #13
0
    def test_kernel_ker2_def_ort1(self):
        ker = Sum(
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                       length_scale_bounds=(1e-3, 1e3)),
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                       length_scale_bounds=(1e-3, 1e3))
        )
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                             op_version=get_opset_number_from_onnx())
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))],
            outputs=[('Y', FloatTensorType([None, None]))],
            target_opset=get_opset_number_from_onnx())
        model_onnx.ir_version = get_ir_version_from_onnx()
        sess = OnnxInference(model_onnx.SerializeToString(),
                             runtime="onnxruntime1")

        rows = []

        def myprint(*args, **kwargs):
            rows.append(" ".join(map(str, args)))

        res = _capture_output(
            lambda: sess.run({'X': Xtest_.astype(numpy.float32)},
                             intermediate=True, verbose=1, fLOG=myprint),
            'c')[0]
        self.assertGreater(len(rows), 2)
        m1 = res['Y']
        self.assertNotEmpty(m1)
        self.assertGreater(len(res), 2)
        # m2 = ker(Xtest_)
        # self.assertEqualArray(m1, m2, decimal=5)

        cpu = OnnxInference(model_onnx.SerializeToString())
        sbs = side_by_side_by_values(
            [cpu, sess], inputs={'X': Xtest_.astype(numpy.float32)})
        self.assertGreater(len(sbs), 2)
        self.assertIsInstance(sbs, list)
        self.assertIsInstance(sbs[0], dict)
        self.assertIn('step', sbs[0])
        self.assertIn('step', sbs[1])
        self.assertIn('metric', sbs[0])
        self.assertIn('metric', sbs[1])
        self.assertIn('cmp', sbs[0])
        self.assertIn('cmp', sbs[1])

        sess3 = _capture_output(
            lambda: OnnxInference(model_onnx.SerializeToString(),
                                  runtime="onnxruntime2"), 'c')[0]
        try:
            sbs = side_by_side_by_values(
                [cpu, sess, sess3], inputs={'X': Xtest_.astype(numpy.float32)})
        except RuntimeError as e:
            if "Got invalid dimensions for input" in str(e):
                # probable bug somewhere
                return
            raise e
        self.assertNotEmpty(sbs)

        inputs = {'X': Xtest_.astype(numpy.float32)}
        sbs = side_by_side_by_values(
            [(cpu, inputs), (sess, inputs), (sess3, inputs)])
        self.assertNotEmpty(sbs)
Пример #14
0
# Find an optimum classification pipeline

import pandas as pd
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.kernel_ridge import KernelRidge
from sklearn.gaussian_process.kernels import Matern, Sum, ExpSineSquared
from SKSurrogate import *

param_grid_krr = {
    "alpha": np.logspace(-4, 0, 5),
    "kernel": [
        Sum(Matern(), ExpSineSquared(l, p))
        for l in np.logspace(-2, 2, 10)
        for p in np.logspace(0, 2, 10)
    ],
}
regressor = RandomizedSearchCV(
    KernelRidge(), param_distributions=param_grid_krr, n_iter=5, cv=2
)

config = {
    # Classifiers
    "sklearn.naive_bayes.GaussianNB": {"var_smoothing": Real(1.0e-9, 2.0e-1)},
    "sklearn.linear_model.LogisticRegression": {
        "penalty": Categorical(["l1", "l2"]),
        "C": Real(1.0e-6, 10.0),
        "class_weight": HDReal((1.0e-5, 1.0e-5), (20.0, 20.0))
        # 'dual': Categorical([True, False])
    },
    "sklearn.svm.SVC": {
Пример #15
0
XcTrain, normalizer_c = normalize(XcTrain)
XcTest = normalizer_c.transform(XcTest)
yTrain, normalizer_y = normalize(yTrain)

print('----------------------------------------------------------------------')

print('training a Gaussian Process on selected features... \n')
print('using %s samples for training' % n_samples)

XTrain = np.concatenate((XdTrain, XcTrain), axis=1)
k1 = 1.0**2 * RBF(length_scale=np.ones(XTrain.shape[1]),
                  length_scale_bounds=(1e-7, 1e7))
k2 = ConstantKernel(0.1, (1e-7, 1e7))
k3 = 1.0**2 * RationalQuadratic(
    length_scale=1.0, length_scale_bounds=(1e-7, 1e7), alpha=0.1)
kernel = Product(Sum(k1, k2), k3)

# generate data and fit GP
model = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
start = time.time()
model.fit(XTrain, yTrain.reshape(-1, 1))
end = time.time()
print('elapsed time: %ss \n' % (end - start))

print('final kernel: %s' % (model.kernel_))

print('----------------------------------------------------------------------')

print('calculating posterior on test data... \n')
start = time.time()
XTest = np.concatenate((XdTest, XcTest), axis=1)
Пример #16
0
    def set_params(self,
                   n_small_stop=None,
                   n_large_stop=None,
                   kernel=None,
                   length=1.0,
                   length_bounds=(1e-5, 1e5),
                   sigma_0=1.0,
                   periodicity=1.0,
                   gamma=1.0,
                   alpha_rquad=1.0,
                   nu=1.0,
                   **kwargs):

        # Define the interpolation region in N
        self.n_small_stop = n_small_stop
        self.n_large_stop = n_large_stop
        self.kernel_name = kernel

        # Kernels taken from tuned_parameters list
        if kernel == "dotprod":
            new_kernel = C(length, (1e-3, 1e3)) * DotProduct(
                sigma_0=sigma_0, sigma_0_bounds=(1e-5, 1e5))
        elif kernel == "expsin":
            new_kernel = C(1.0, (1e-3, 1e3)) * ExpSineSquared(
                length_scale=length,
                periodicity=periodicity,
                length_scale_bounds=length_bounds)
        elif kernel == "exp":
            new_kernel = C(1.0, (1e-3, 1e3)) * Exponentiation(
                RBF(length_scale=length, length_scale_bounds=length_bounds), 2)
        elif kernel == "matern":
            new_kernel = C(1.0, (1e-3, 1e3)) * Matern(
                length_scale=length, length_scale_bounds=length_bounds, nu=nu)
        elif kernel == "pairwise":
            new_kernel = C(1.0, (1e-3, 1e3)) * PairwiseKernel(
                gamma=gamma, gamma_bounds=(1e-5, 1e5))
        elif kernel == "rbf":
            new_kernel = C(1.0, (1e-3, 1e3)) * RBF(
                length_scale=length, length_scale_bounds=length_bounds)
        elif kernel == "rquad":
            new_kernel = C(1.0, (1e-3, 1e3)) * RationalQuadratic(
                length_scale=length,
                alpha=alpha_rquad,
                length_scale_bounds=length_bounds,
                alpha_bounds=(1e-5, 1e5))
        # Combinations of basic kernels
        elif kernel == "prod":
            new_kernel = C(1.0, (1e-3, 1e3)) * Product(
                RBF(length, length_bounds), Matern(
                    length, length_bounds, nu=nu))
        elif kernel == "sum":
            new_kernel = C(1.0, (1e-3, 1e3)) * Sum(
                RBF(length, length_bounds), Matern(
                    length, length_bounds, nu=nu))
        elif kernel == "prod2":
            new_kernel = C(1.0, (1e-3, 1e3)) * Product(
                RationalQuadratic(length_scale=length,
                                  alpha=alpha_rquad,
                                  length_scale_bounds=length_bounds,
                                  alpha_bounds=(1e-5, 1e5)),
                Matern(length, length_bounds, nu=nu))
        elif kernel == "sum2":
            new_kernel = C(1.0, (1e-3, 1e3)) * Sum(
                RationalQuadratic(length_scale=length,
                                  alpha=alpha_rquad,
                                  length_scale_bounds=length_bounds,
                                  alpha_bounds=(1e-5, 1e5)),
                Matern(length, length_bounds, nu=nu))
        else:
            raise NotImplementedError(f"Kernel not implemented {kernel}")

        # Call the super() class (GaussianProcessRegressor)
        # and give manually the parameteres taken from tuned_parameters list
        result = super().set_params(kernel=new_kernel, **kwargs)

        return result
Пример #17
0
# Set the parameters to hyperoptimize
kernel1 = DotProduct(sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5))
kernel2 = ExpSineSquared(length_scale=1.0,
                         periodicity=1.0,
                         length_scale_bounds=(1e-5, 1e5))
kernel3 = Exponentiation(
    RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e2)), 2)
kernel4 = Matern(length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5)
kernel5 = PairwiseKernel(gamma=1.0, gamma_bounds=(1e-5, 1e5))
kernel6 = Product(RBF(1.0, (1e-5, 1e5)), Matern(1.0, (1e-5, 1e5), nu=1.5))
kernel7 = RBF(length_scale=1.0, length_scale_bounds=(1e-5, 1e5))
kernel8 = RationalQuadratic(length_scale=1.0,
                            alpha=1.0,
                            length_scale_bounds=(1e-5, 1e5),
                            alpha_bounds=(1e-5, 1e5))
kernel9 = Sum(RBF(1.0, (1e-2, 1e2)), Matern(10, (1e-2, 1e2), nu=1.5))
# List of hyperparameters given to the GridSearchCV()
tuned_parameters = [{
    "kernel": [
        kernel1, kernel2, kernel3, kernel4, kernel5, kernel6, kernel7, kernel8,
        kernel9
    ]
}]
kernel_names = ["DP", "ES", "Exp", "Mat", "PW", "Prod", "RBF", "RQ", "Sum"]

# Instance the GridSearchCV()
gp_hyper = GridSearchCV(GaussianProcessRegressor(), tuned_parameters)
gp_hyper.fit(np.log10(pt), sigma)

# Compute means and standard deviations
print("\nChecking performance")
Пример #18
0
training_data = [nou, bits, pktNum, sigS, dataRate, phyB, phyG, phyN]
test_data = [nou_tst, bits_tst, pkt_tst, sigS_tst, dR_tst, b_tst, g_tst, n_tst]
labels = ["Number of users", "Bits", "Number of Packets", "Signal Strength",\
          "Data Rate(MB)", "802.11b bits", "802.11g bits", "802.11n bits"]
#
#Number of test samples
p = 0
r = 100
#Window size
D = 15

kernel1 = LK(sigma_0=1, sigma_0_bounds=(1e-1, 1e1))
kernel2 = CK(constant_value=1)
kernel3 = WK(0.1)
kernel = Sum(kernel1, kernel2)
kernel = Sum(kernel, kernel3)
#1e-1 for linear + constant, 1e-3 for RBF
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10,\
                              normalize_y=False, alpha=1e-1)
#print gp.get_params()['kernel']

for z in range(len(labels)):  #len(labels)
    total_samp, Xtr, Ytr, Xtst, Ycomp, Ytst = GP_prep(training_data[z],
                                                      test_data[z], 60, p, r,
                                                      D)

    print(Xtr.shape, Ytr.shape, Xtst.shape, Ytst.shape)
    #Testing if it overfits
    Xtr_1 = [Xtr[i] for i in range(D, r)]