Ejemplo n.º 1
0
def gp_sklearn_interpolator(x, y, res=1000):

    kernel = DotProduct(10.0, (1e-2, 1e2)) * RationalQuadratic(0.1)
    gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
    gp.fit(x.reshape(-1, 1), (y - x).reshape(-1, 1))

    x_pred = np.linspace(0, 1, 1000)
    y_pred, sigma = gp.predict(x_pred[:, np.newaxis], return_std=True)
    y_pred = y_pred.ravel() + x_pred

    return x_pred, y_pred
Ejemplo n.º 2
0
def test_gpr_correct_error_message():
    X = np.arange(12).reshape(6, -1)
    y = np.ones(6)
    kernel = DotProduct()
    gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
    message = ("The kernel, %s, is not returning a "
               "positive definite matrix. Try gradually increasing "
               "the 'alpha' parameter of your "
               "GaussianProcessRegressor estimator." % kernel)
    with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
        gpr.fit(X, y)
 def update_cov_fcns(self,
                     rbf_ub=2000.0,
                     matern_ub=4000.0,
                     dotProduct_ub=1000.0):
     self.cov = sum([
         0.5 * RBF(length_scale=100.0, length_scale_bounds=(1e-1, rbf_ub)),
         0.5 *
         Matern(length_scale=100.0, length_scale_bounds=(1e-1, matern_ub)),
         1.0 *
         DotProduct(sigma_0=100.0, sigma_0_bounds=(1e-2, dotProduct_ub)),
         1.0 * WhiteKernel()
     ])
 def test_partial_float64(self):
     data = load_boston()
     X, y = data.data, data.target
     X_train, X_test, y_train, _ = train_test_split(X, y)
     gau = GaussianProcessRegressor(alpha=10, kernel=DotProduct())
     gau.fit(X_train, y_train)
     onnxgau48 = to_onnx(gau, X_train.astype(numpy.float32), dtype=numpy.float32,
                         options={GaussianProcessRegressor: {'float64': True}})
     oinf48 = OnnxInference(onnxgau48, runtime="python")
     out = oinf48.run({'X': X_test.astype(numpy.float32)})
     y = out['GPmean']
     self.assertEqual(y.dtype, numpy.float32)
Ejemplo n.º 5
0
def create_classifiers() -> List:
    kernel = DotProduct() + WhiteKernel()
    return [KNeighborsClassifier(3),
            SVC(kernel='poly', gamma='scale', probability=True),
            SVC(gamma=2, C=1, probability=True),
            GaussianProcessClassifier(kernel=Matern(nu=2.5)),
            GaussianProcessClassifier(kernel=kernel),
            RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1, min_samples_leaf=5),
            RandomForestClassifier(max_depth=10, n_estimators=10, max_features=1, ),
            GaussianNB(),
            LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto', tol=0.0001),
            QuadraticDiscriminantAnalysis()]
Ejemplo n.º 6
0
def plot_prediction_color(filename, material):
    df = pd.read_json(filename)
    df_filtered = df.loc[df['breeder_material_name'] == material]

    for k in range(
            1, 100
    ):  #improvement of the dataset we remove the worst tbr values and we had a better enrichment configuration to replace it
        X = list(df_filtered['enrichment_value'])
        y = list(df_filtered['value'])

        kernel = DotProduct() + WhiteKernel()
        gpr = GaussianProcessRegressor(kernel=kernel, random_state=0).fit(X, y)
        gpr.score(X, y)
        row_max_tbr = df_filtered.loc[df_filtered['value'].idxmax()]
        row_min_tbr = df_filtered.loc[df_filtered['value'].idxmin()]

        bounds = [(0, 1), (0, 1)]
        GP = GpOptimiser(X, y, bounds=bounds)

        new_enrichment_value = list(GP.search_for_maximum())

        X.remove(row_min_tbr['enrichment_value'])
        y.remove(row_min_tbr['value'])

        print('new enrichment fraction', new_enrichment_value)
        append_to_json = find_tbr_dict(new_enrichment_value, material, True,
                                       500000)
        #adjust the number of batches with the experiment
        X.append(new_enrichment_value)
        y.append(append_to_json['value'])

        with open(
                'results_new_neutron_source/added_' + str(k) +
                '_result_2_layers_halton_first_wall_neural_network.json',
                'w') as file_object:
            json.dump([append_to_json], file_object, indent=2)

        print('file created')
        df_append = pd.read_json(
            'results_new_neutron_source/added_' + str(k) +
            '_result_2_layers_halton_first_wall_neural_network.json')
        df_filtered = df_filtered.append(df_append,
                                         ignore_index=True,
                                         sort=True)

        idx = df_filtered.index[df_filtered['value'] == row_min_tbr['value']]
        df_filtered = df_filtered.drop(idx[0])

    TBR = y
    print(
        'The max TBR for ' + str(len(X[0])) + ' layers and ' + str(material) +
        ' is', max(TBR))
Ejemplo n.º 7
0
    def error_rate(self):
        if self.kernel == "RBF":
            gpc = GaussianProcessClassifier(kernel=1.0 * RBF([1.0])).fit(self.x_train, self.y_train)
        elif self.kernel == "DP":
            gpc = GaussianProcessClassifier(kernel=DotProduct(sigma_0=1.0)).fit(self.x_train, self.y_train)
        else:
            print("Error")
        yp_train = gpc.predict(self.x_train)
        train_error_rate = np.mean(np.not_equal(yp_train, self.y_train))
        yp_test = gpc.predict(self.x_test)
        test_error_rate = np.mean(np.not_equal(yp_test, self.y_test))

        return train_error_rate, test_error_rate
Ejemplo n.º 8
0
def gaussian_proces_regressor(data):
    predicted_speedups = np.zeros((data.shape[0]))
    for row_idx in range(0, data.shape[0]):
        training_x = np.concatenate(
            (data[0:row_idx, 1:-1], data[row_idx + 1:, 1:-1]), axis=0)
        training_y = np.concatenate(
            (data[0:row_idx, -1], data[row_idx + 1:, -1]), axis=0)
        kernel = DotProduct() + WhiteKernel()
        reg = GaussianProcessRegressor(kernel=kernel, random_state=0).fit(
            training_x, training_y)
        test_x = data[row_idx:row_idx + 1, 1:-1]
        predicted_speedups[row_idx] = reg.predict(test_x)
    return predicted_speedups
    def test_export_sklearn_kernel_dot_product(self):
        def kernel_call_ynone(X, sigma_0=2.):
            t_sigma_0 = py_make_float_array(py_pow(sigma_0, 2))
            K = X @ numpy.transpose(X, axes=[1, 0]) + t_sigma_0
            return K

        x = numpy.array([[1, 2], [3, 4], [5, 6]], dtype=float)
        kernel = DotProduct(sigma_0=2.)
        exp = kernel(x, None)
        got = kernel_call_ynone(x, sigma_0=2.)
        self.assertEqualArray(exp, got)

        context = {
            'numpy.inner': numpy.inner,
            'numpy.transpose': numpy.transpose,
            'py_pow': py_pow,
            'py_make_float_array': py_make_float_array
        }

        from skl2onnx.algebra.onnx_ops import (  # pylint: disable=E0611,E0401
            OnnxTranspose, OnnxMatMul, OnnxAdd, OnnxPow)
        ctx = {
            'OnnxPow': OnnxPow,
            'OnnxAdd': OnnxAdd,
            'OnnxIdentity': OnnxIdentity,
            'OnnxTranspose': OnnxTranspose,
            'OnnxMatMul': OnnxMatMul,
            'py_make_float_array': py_make_float_array,
            'py_pow': py_pow
        }

        fct = translate_fct2onnx(kernel_call_ynone,
                                 context=context,
                                 cpl=True,
                                 context_cpl=ctx,
                                 output_names=['Z'])

        r = fct('X', op_version=get_opset_number_from_onnx())
        self.assertIsInstance(r, OnnxIdentity)
        inputs = {'X': x.astype(numpy.float32)}
        onnx_g = r.to_onnx(inputs)
        oinf = OnnxInference(onnx_g)
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])

        exp = kernel(x.T, None)
        got = kernel_call_ynone(x.T)
        self.assertEqualArray(exp, got)
        inputs = {'X': x.T.astype(numpy.float32)}
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])
Ejemplo n.º 10
0
    def get_phi(self):
        kernel = 1.0*RBF(length_scale=1.0) + WhiteKernel(noise_level=1.0)
        kernel += 1.0*DotProduct(sigma_0=1.0) + 1.0*Matern(length_scale=1.0)
        estimator = GaussianProcessRegressor(kernel=kernel, 
                n_restarts_optimizer=8, alpha=0)
        phi = []
        score_lib = {} 
        for n, i in enumerate(self.features):
            print('Working on {}'.format(i))
            score_comb = []
            f = [j for j in self.features if j != i]
            for j in range(0, len(f)+1):
                score = []
                for k in combinations(f, j):
                    if j != 0:
                        key = '_'.join(sorted(list(k)))
                        if key in score_lib:
                            score_exclude = score_lib[key]
                            print('Getting data from lib.')
                        else:
                            X = self.df[list(k)]
                            y = self.df[self.target]
                            estimator.fit(X, y)
                            score_exclude = r2_score(estimator.predict(X), y)
                            score_lib[key] = score_exclude
                            msg = 'Adding {} with score {} to the score lib'
                            print(msg.format(key, score_exclude))
                    else:
                        score_exclude = 0
                    key = '_'.join(sorted(list(k)+[i]))
                    if key in score_lib:
                        score_include = score_lib[key]
                        print('Getting data from lib.')
                    else:
                        X = self.df[list(k) + [i]]
                        y = self.df[self.target]
                        estimator.fit(X, y) 
                        score_include = r2_score(estimator.predict(X), y)
                        score_lib[key] = score_include
                        msg = 'Adding {} with score {} to the score lib'
                        print(msg.format(key, score_include))
                    score.append(score_include - score_exclude)
                score_comb.append(sum(score)/len(score))
            phi.append(sum(score_comb)/len(score_comb))

        phi_percentage = [i/sum(phi)*100 for i in phi]

        return {'features': self.features,
                'phi': phi, 
                'phi_percentage': phi_percentage,
                'score_lib': score_lib}
Ejemplo n.º 11
0
    def __init__(self,
                 ml_algs=[
                     'LR', 'GPR', 'MLP', 'DL', 'SVR', 'RFR', 'DTR', 'GBR'
                 ]):
        super().__init__()
        self.regressors = []

        for alg in ml_algs:
            # if alg == 'DL':
            #     self.regressors.append(DeepLearningRegressor(type='custom'))
            if alg == 'BRR':
                self.regressors.append(linear_model.BayesianRidge())
            elif alg == 'RFR':
                self.regressors.append(RandomForestRegressor(n_estimators=100))
            elif alg == 'DTR':
                self.regressors.append(DecisionTreeRegressor())
            elif alg == 'GBR':
                self.regressors.append(GradientBoostingRegressor())
            elif alg == 'LR':
                self.regressors.append(LinearRegression())
            elif alg == 'GPR':
                self.regressors.append(
                    GaussianProcessRegressor(kernel=DotProduct() +
                                             WhiteKernel(),
                                             random_state=0))
            elif alg == 'SVR':
                self.regressors.append(
                    SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1))
            elif alg == 'MLP':
                self.regressors.append(
                    MLPRegressor(hidden_layer_sizes=(100, ),
                                 activation='relu',
                                 solver='adam',
                                 alpha=0.001,
                                 batch_size='auto',
                                 learning_rate='constant',
                                 learning_rate_init=0.01,
                                 power_t=0.5,
                                 max_iter=1000,
                                 shuffle=True,
                                 random_state=0,
                                 tol=0.0001,
                                 verbose=False,
                                 warm_start=False,
                                 momentum=0.9,
                                 nesterovs_momentum=True,
                                 early_stopping=False,
                                 validation_fraction=0.1,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08))
Ejemplo n.º 12
0
    def fit(self, x, y):
        y = y["y_ph"]

        # define the GP kernel
        kernel = DotProduct(sigma_0=self.sigma_0)
        # kernel = RBF(length_scale=1e2)

        # define the model
        self.model = GaussianProcessRegressor(kernel=kernel,
                                              alpha=self.alpha,
                                              random_state=misc.seed)

        # fit the model
        self.model.fit(x, y)
Ejemplo n.º 13
0
def regressor(X_train, Y_train):
    kernel = 1.0 * RBF(length_scale=0.01, length_scale_bounds=(1e-1, 1e2)) + (
        DotProduct()**3) * WhiteKernel(noise_level=2.e-8,
                                       noise_level_bounds=(1e-10, 1e-1))
    gp = GaussianProcessRegressor(kernel=kernel,
                                  alpha=0.,
                                  n_restarts_optimizer=15).fit(
                                      X_train, Y_train)
    print "kernel init: ", kernel
    print "kernel init params: ", kernel.theta
    print "kenel optimum: ", gp.kernel_
    print "opt kernel params: ", gp.kernel_.theta
    print "LML (opt): ", gp.log_marginal_likelihood()
    return gp
Ejemplo n.º 14
0
    def __init__(self, params=None, limit=None, model=None):
        """ Init """

        if model is None:
            kernel = PairwiseKernel(
                metric='laplacian') * DotProduct() + WhiteKernel(
                    noise_level=5.0)
            self.model = GaussianProcessClassifier(kernel=kernel, n_jobs=-1)
        else:
            self.fitted = True
            self.model = model

        if limit is not None:
            self.limit = limit
Ejemplo n.º 15
0
def gpr_train2(train_bids, test_bid):
    test_data, train_X, train_Y, test_X, test_Y = get_data2(
        train_bids, test_bid)
    kernel = RBF() + Matern() + RationalQuadratic() + DotProduct()
    reg = GaussianProcessRegressor(kernel=kernel,
                                   n_restarts_optimizer=10,
                                   alpha=0.1)
    reg.fit(train_X, train_Y)
    output, err = reg.predict(test_X, return_std=True)
    rmse = np.sqrt(metrics.mean_squared_error(test_Y, output))
    print(test_bid + ": " + str(rmse))
    X = np.arange(test_data.shape[0])
    up, down = output * (1 + 1.96 * err), output * (1 - 1.96 * err)
    return X, test_Y, output, rmse, up, down
Ejemplo n.º 16
0
def str2ker(str):
    k1 = C(1.0) * RBF(length_scale=1)
    k2 = C(1.0) * RationalQuadratic(length_scale=1)
    k4 = DotProduct(sigma_0=1)
    k3 = C(1.0) * ExpSineSquared(length_scale=1, periodicity=1)
    k5 = WhiteKernel(1.0)
    map = {"s": k1, "r": k2, "p": k3, "l": k4}

    #  if basic kernel
    if len(str) == 1:
        ker = map[str]
    else:

        # if composite kernel
        ker = []
        factor = map[str[0]]
        op = str[1]
        for i in range(2, len(str), 2):

            # if the operator is *, use @ covProd to continue costructing the
            #  factor
            if op == '*':
                factor = factor * map[str[i]]

                # the end?
                if i == len(str) - 1:
                    if not ker:
                        ker = factor
                    else:
                        ker = ker + factor
                else:
                    op = str[i + 1]
                # if the oprator is +, combine current factor with ker then form a
                #  new factor
            else:
                if not ker:
                    ker = factor
                else:
                    ker = ker + factor

                factor = map[str[i]]

                # % the end?
                if i == len(str) - 1:
                    ker = ker + factor
                else:
                    op = str[i + 1]
    ker = ker + k5
    return ker
Ejemplo n.º 17
0
    def __init__(self, params=None, model=None, limit=.5, noise_level=5):
        """ Init """
        logging.info('Using scikit GPCLassifier')

        if model is None:
            kernel = PairwiseKernel(
                metric='laplacian') * DotProduct() + WhiteKernel(
                    noise_level=noise_level)
            self.model = GaussianProcessClassifier(kernel=kernel, n_jobs=-1)
        else:
            self.fitted = True
            self.model = model

        if limit is not None:
            self.limit = limit
Ejemplo n.º 18
0
def fig_dotprod_kernel(only_trace: bool = True):
    from sklearn.gaussian_process.kernels import DotProduct
    kernel = DotProduct()
    x = np.linspace(-1, 1, 100)
    x_exp = np.expand_dims(x, axis=1)
    surf_data = kernel(x_exp, x_exp)
    trace = go.Surface(x=x, y=x, z=surf_data, showscale=False)
    if only_trace:
        return trace
    else:
        fig.update_layout(scene=dict(xaxis_title='xi',
                                     yaxis_title='xj',
                                     zaxis_title='Linear Kernel Value'),
                          margin=dict(r=10, b=10, l=10, t=10))
        return fig
Ejemplo n.º 19
0
def main():
    dg = DatasetGenerater(X_MIN, X_MAX, N_DATA, NOISE_AMP, BASE_FUNC, is_noisy=True)
    X, Y = dg.generate_dateset()
    train_data_index = np.random.randint(0, N_DATA, int(N_DATA * 0.1))
    X_train, Y_train = X[train_data_index], Y[train_data_index]
    X, X_train = X.reshape((-1, 1)), X_train.reshape((-1, 1))
    kernels = {
        "Gaussian Kernel": 0.594 ** 2 * RBF(length_scale=0.279),
        "Product": DotProduct(),
    }
    for name, kernel in kernels.items():
        model_gpr = GaussianProcessRegressor(kernel=kernel)
        reg = model_gpr.fit(X_train, Y_train)
        Y_pred, Y_std = reg.predict(X, return_std=True)
        Visualizer.visualize_gaussian_process(X, Y, Y_pred, Y_std, title=name)
Ejemplo n.º 20
0
    def fit(self):
        # get training data
        x, y, t = self._str2dataset("train")

        # define the GP kernel
        kernel = DotProduct(sigma_0=self.params["sigma_0"],
                            sigma_0_bounds=(self.params["sigma_0"],self.params["sigma_0"]))

        # define the model
        self.model = GaussianProcessRegressor(kernel=kernel,
                                              alpha=self.params["alpha"],
                                              random_state=cs.seed
                                              )
        # fit the model
        self.model.fit(x, y)
Ejemplo n.º 21
0
 def initGPs(self):
     for c in range(0, self.ncampaigns):
         #C(1.0, (1e-3, 1e3))
         #l= np.array([200,200])
         #kernel = C(1, (1e-3, 1e1))*RBF(l, ((100, 300),(100,300)))
         l = np.array([1.0, 1.0])
         kernel = C(1.0, (1e-3, 1e3)) * RBF(l, ((1e-3, 1e3), (1e-3, 1e3)))
         kernel = kernel * DotProduct(1.0)
         #l=1.0
         #kernel = C(1.0, (1e-3, 1e3)) * RBF(l, (1e-3, 1e3))
         alpha = 200
         self.gps.append(
             GaussianProcessRegressor(kernel=kernel,
                                      alpha=alpha,
                                      n_restarts_optimizer=10,
                                      normalize_y=True))
Ejemplo n.º 22
0
Archivo: gps.py Proyecto: vikatelis/gym
    def __init__(self):
        # define dimensionality
        self.ndim = 2
        # kernels
        # 1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
        #                length_scale_bounds=(0.1, 10.0),
        #                periodicity_bounds=(1.0, 10.0)),
        self.kernels = [
            1.0 * RBF(length_scale=8.0, length_scale_bounds=(1e-1, 10.0)),
            1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
            ConstantKernel(0.1, (0.01, 10.0)) *
            (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0))**2), 1.0 *
            Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)
        ]
        # range action
        self.min_action = -10 * np.ones(self.ndim)
        self.max_action = 10 * np.ones(self.ndim)
        # range observation - dimensionality
        self.low_state = np.append([-30.0, -30.0], self.min_action)
        self.high_state = np.append([30.0, 30.0], self.max_action)
        # range state - dimensionality
        self.low_state_p = -6 * np.ones(self.ndim)
        self.high_state_p = 6 * np.ones(self.ndim)
        # meshgrid of states
        #self.x_ = np.arange(self.low_state_p[0]-1,self.high_state_p[0]+1,1)
        self.x_ = np.arange(-8, 8, 1)
        self.grid = np.array(np.meshgrid(self.x_, self.x_)).T.reshape(-1, 2)
        print(self.grid.shape)
        #self.grid = self.grid.reshape(-1,1)
        #print(self.grid.shape)
        # boxes
        self.action_space = spaces.Box(low=self.min_action,
                                       high=self.max_action)
        self.observation_space = spaces.Box(low=self.low_state,
                                            high=self.high_state)
        self.hyper_space = spaces.Box(low=self.low_state_p,
                                      high=self.high_state_p)
        # init
        self.obs = self.observation_space.sample()
        self.state = self.hyper_space.sample()
        self.prev_unscaled = 0
        self.gp = 0

        # init thread
        self.seed()
        self.reset()
def train_gpr_model(TRAIN):
    """ Train Gaussian Process Regression model
    Params:
    -------
    TRAIN: Pandas dataframe
        training set; last column is label
    
    Yields:
    -------
    gpr: Gaussian Process Regression model
    """
    train_dat = TRAIN.iloc[:, :-1]
    train_gs = TRAIN.iloc[:, -1]
    kernel = DotProduct() + WhiteKernel()
    gpr = GaussianProcessRegressor(kernel=kernel,
                                   random_state=0).fit(train_dat, train_gs)
    return gpr
Ejemplo n.º 24
0
    def __init__(self,
                 args1={
                     'kernel': 'rbf',
                     'probability': True
                 },
                 args2={
                     'kernel': DotProduct(),
                     'probability': True
                 },
                 verbose=False):
        """
        ...
        """
        self.model1 = SVC(**args1)
        self.model2 = SVC(**args2)

        self.verbose = verbose
Ejemplo n.º 25
0
def test_sum():

    # define rbf and dot product custom kernels, and set hyperparameters
    custom_rbf_kernel = generate_kernel(rbf, rbf_grad)
    custom_dot_prod_kernel = generate_kernel(dot_prod, dot_prod_grad)

    sum_custom_kernel = Sum(custom_dot_prod_kernel, custom_rbf_kernel)
    model = GaussianProcessRegressor(kernel=sum_custom_kernel, random_state=0)
    model.fit(X_train, Y_train)
    preds1 = model.predict(X_test)

    sum_kernel = Sum(DotProduct(), RBF())
    model = GaussianProcessRegressor(kernel=sum_kernel, random_state=0)
    model.fit(X_train, Y_train)
    preds2 = model.predict(X_test)

    assert (np.all(np.isclose(preds1, preds2)))
Ejemplo n.º 26
0
def model_GaussianProcess(option):
    option.report_model = 'GaussianProcess'
    option.lamNum = 0
    sign = -1
    kernel = None
    dim = np.asarray(option.X).shape[1]
    if option.mode == '1':
        while (sign != 0):
            choice = float(
                input(
                    'Choose kernel: 1. RBF 2. ExpSineSquared 3. RationalQuadratic 4. WhiteKernel 5. Dotproduct 6. ConstantKernel 7. Matern:'
                ))
            if (choice == 1):
                nextkernel = RBF(5 * np.ones(dim),
                                 length_scale_bounds=(1e-3, 1e3))
            elif (choice == 2):
                nextkernel = ExpSineSquared(length_scale=1.3, periodicity=1)
            elif (choice == 3):
                nextkernel = RationalQuadratic(alpha=0.7, length_scale=1.2)
            elif (choice == 4):
                nextkernel = WhiteKernel(noise_level=1)
            elif (choice == 5):
                nextkernel = DotProduct(sigma_0=1)
            elif (choice == 6):
                nextkernel = ConstantKernel()
            elif (choice == 7):
                nextkernel = Matern(length_scale=1.0, nu=1.5)
            else:
                print("Bad input.")
            if (sign == 1):
                kernel = kernel + nextkernel
            elif (sign == 2):
                kernel = kernel * nextkernel
            elif (sign == 3):
                kernel = kernel**nextkernel
            elif (sign == -1):
                kernel = nextkernel
            else:
                print('Bad input')
            sign = float(input('Choose sign: 1.add 2.multiply 3.exp 0.done:'))
    else:
        kernel = 1.0 * RBF(1.0)
    print('Your kernel:')
    print(kernel)
    option.kernel = kernel
    def test_kernel_ker2_dotproduct(self):
        ker = DotProduct(sigma_0=2.)
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32)
        model_onnx = onx.to_onnx(inputs=[('X', FloatTensorType())],
                                 outputs=[('Y', FloatTensorType())],
                                 dtype=np.float32)
        sess = InferenceSession(model_onnx.SerializeToString())

        x = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
        res = sess.run(None, {'X': x})
        m1 = res[0]
        m2 = ker(x)
        assert_almost_equal(m1, m2, decimal=5)

        res = sess.run(None, {'X': Xtest_.astype(np.float32)})
        m1 = res[0]
        m2 = ker(Xtest_)
        assert_almost_equal(m1, m2, decimal=2)
Ejemplo n.º 28
0
def get_nonlinear_model(params):
    """output a nonlinear GPR model
    params: dict, containing details on PCA if required

    returns:
    model: sklearn estimator
    """
    kernel = 1 * DotProduct(sigma_0=1e-5, sigma_0_bounds='fixed') + RBF(length_scale=10.0, length_scale_bounds=(1.0, 1000.0)) + WhiteKernel(10.0, noise_level_bounds=(1.0,1000))
    ss = StandardScaler()
    gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, normalize_y=True, n_restarts_optimizer=50)

    if params['pca']:
        pca = PCA(n_components=params['pca_comps'], whiten=True)
        nonlinear_model = Pipeline(steps=(['scale', ss], ['pca', pca], ['model', gpr])) # pipeline
    else:
        nonlinear_model = Pipeline(steps=(['scale', ss], ['model', gpr]))

    return clone(nonlinear_model)
Ejemplo n.º 29
0
def train_concat_model(target_wl, observed_data, primer_data, closest_wl):
    def remove_duplicate_knobs(xd, yd):
        unique_knobs = [tuple(xd[i, :]) for i in range(xd.shape[0])]
        idxs_to_remove = []
        for i in range(xd.shape[0]):
            following_knobs = [
                tuple(xd[j, :]) for j in range(i + 1, xd.shape[0])
            ]
            if tuple(xd[i]) in following_knobs:
                idxs_to_remove.append(i)
        xd = np.delete(xd, idxs_to_remove, 0)
        yd = np.delete(yd, idxs_to_remove, 0)
        return xd, yd

    closest_data = observed_data.prune_columns(
        ['workload id'] + observed_data.get_tuning_knob_headers() +
        ['latency']).get_specific_workload(closest_wl).get_dataframe()
    target_data = primer_data.prune_columns(
        ['workload id'] + primer_data.get_tuning_knob_headers() +
        ['latency']).get_specific_workload(target_wl).get_dataframe()
    concat_data = pd.concat([closest_data, target_data],
                            ignore_index=True).values
    # concat_data = target_data.values
    X, y = remove_duplicate_knobs(concat_data[:, 1:-1], concat_data[:, -1])

    alpha = np.array([7e8 for i in range(X.shape[0] - 5)] +
                     [1e1 for i in range(5)])
    kernel = ConstantKernel(0.01, (0.01, 0.5)) * (DotProduct(
        sigma_0=2.0, sigma_0_bounds=(0.01, 30.0))**2)
    model = GaussianProcessRegressor(kernel=kernel,
                                     normalize_y=True,
                                     alpha=alpha,
                                     n_restarts_optimizer=15)

    ss_x = StandardScaler()
    ss_y = StandardScaler()

    if use_scaling:
        binary_knobs = X[:, 6]
        X = ss_x.fit_transform(X)
        X[:, 6] = binary_knobs
        # y = ss_y.fit_transform(np.expand_dims(y,1))
    model.fit(X, y)
    return model, ss_x, ss_y
Ejemplo n.º 30
0
    def _kernels(self, i):
        kernels = [
            1.0 * RBF(length_scale=self.length_scale,
                      length_scale_bounds=self.length_scale_bounds),
            1.0 * RationalQuadratic(length_scale=self.length_scale,
                                    alpha=self.alpha),
            1.0 * ExpSineSquared(length_scale=self.length_scale,
                                 periodicity=self.periodicity,
                                 length_scale_bounds=self.length_scale_bounds,
                                 periodicity_bounds=self.periodicity_bounds),
            ConstantKernel(constant_value=self.constant_value,
                           constant_value_bounds=self.constant_value_bounds) *
            (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0))**2),
            1.0 * Matern(length_scale=self.length_scale,
                         length_scale_bounds=self.length_scale_bounds,
                         nu=self.nu)
        ]

        return kernels[i]