Beispiel #1
0
    def train(self, X_train, y_train):
        if self.flavour == 'plain':
            self.smt_model = KRG(poly=self.poly,
                                 corr=self.corr,
                                 theta0=self.theta0)
        elif self.flavour == 'pls':
            self.smt_model = KPLS(poly=self.poly,
                                  corr=self.corr,
                                  theta0=self.theta0,
                                  n_comp=self.n_comp)
        elif self.flavour == 'plsk':
            self.smt_model = KPLSK(poly=self.poly,
                                   corr=self.corr,
                                   theta0=self.theta0,
                                   n_comp=self.n_comp)
        elif self.flavour == 'gepls':
            self.smt_model = GEKPLS(poly=self.poly,
                                    corr=self.corr,
                                    theta0=self.theta0,
                                    n_comp=self.n_comp,
                                    xlimits=self.xlimits,
                                    delta_x=self.delta_x,
                                    extra_points=self.extra_points)

        super(KrigingModel, self).train(X_train, y_train)
Beispiel #2
0
    def setUp(self):
        ndim = 3
        nt = 100
        ne = 100
        ncomp = 1

        problems = OrderedDict()
        problems['exp'] = TensorProduct(ndim=ndim, func='exp')
        problems['tanh'] = TensorProduct(ndim=ndim, func='tanh')
        problems['cos'] = TensorProduct(ndim=ndim, func='cos')

        sms = OrderedDict()
        sms['LS'] = LS()
        sms['QP'] = QP()
        sms['KRG'] = KRG(theta0=[1e-2] * ndim)
        sms['KPLS'] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
        sms['KPLSK'] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
        sms['GEKPLS'] = GEKPLS(theta0=[1e-2] * ncomp,
                               n_comp=ncomp,
                               delta_x=1e-1)
        if compiled_available:
            sms['IDW'] = IDW()
            sms['RBF'] = RBF()
            sms['RMTC'] = RMTC()
            sms['RMTB'] = RMTB()

        t_errors = {}
        t_errors['LS'] = 1.0
        t_errors['QP'] = 1.0
        t_errors['KRG'] = 1e-5
        t_errors['KPLS'] = 1e-5
        t_errors['KPLSK'] = 1e-5
        t_errors['GEKPLS'] = 1e-5
        if compiled_available:
            t_errors['IDW'] = 1e-15
            t_errors['RBF'] = 1e-2
            t_errors['RMTC'] = 1e-1
            t_errors['RMTB'] = 1e-1

        e_errors = {}
        e_errors['LS'] = 1.5
        e_errors['QP'] = 1.5
        e_errors['KRG'] = 1e-2
        e_errors['KPLS'] = 1e-2
        e_errors['KPLSK'] = 1e-2
        e_errors['GEKPLS'] = 1e-2
        if compiled_available:
            e_errors['IDW'] = 1e0
            e_errors['RBF'] = 1e0
            e_errors['RMTC'] = 2e-1
            e_errors['RMTB'] = 2e-1

        self.nt = nt
        self.ne = ne
        self.ndim = ndim
        self.problems = problems
        self.sms = sms
        self.t_errors = t_errors
        self.e_errors = e_errors
def kPLSK(xt, yt, xtest, ytest):
    ########### The KPLSK model
    # 'n_comp' and 'theta0' must be an integer in [1,ndim[ and a list of length n_comp, respectively.

    t = KPLSK(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    print('KPLSK,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    title = 'KPLSK model: validation of the prediction model'
    return t, title, xtest, ytest
    def test_kplsk(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KPLSK

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])

        sm = KPLSK(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)
        # estimated variance
        s2 = sm.predict_variances(x)
        # derivative according to the first variable
        dydx = sm.predict_derivatives(xt, 0)

        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(["Training data", "Prediction"])
        plt.show()

        # add a plot with variance
        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.fill_between(
            np.ravel(x),
            np.ravel(y - 3 * np.sqrt(s2)),
            np.ravel(y + 3 * np.sqrt(s2)),
            color="lightgrey",
        )
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(["Training data", "Prediction", "Confidence Interval 99%"])
        plt.show()
    def test_kplsk(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KPLSK

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])

        sm = KPLSK(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)
        yy = sm.predict_derivatives(xt, 0)
        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(["Training data", "Prediction"])
        plt.show()
Beispiel #6
0
    def test_kplsk(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KPLSK

        xt = np.array([0., 1., 2., 3., 4.])
        yt = np.array([0., 1., 1.5, 0.5, 1.0])

        sm = KPLSK(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0., 4., num)
        y = sm.predict_values(x)
        yy = sm.predict_derivatives(xt, 0)
        plt.plot(xt, yt, 'o')
        plt.plot(x, y)
        plt.xlabel('x')
        plt.ylabel('y')
        plt.legend(['Training data', 'Prediction'])
        plt.show()
Beispiel #7
0
    def train(self, train_method, **kwargs):
        """Trains the surrogate model with given training data.

        Parameters
        ----------
        train_method : str
            Training method among ``IDW``, ``KPLS``, ``KPLSK``, ``KRG``, ``LS``, ``QP``, ``RBF``, ``RMTB``, ``RMTC``
        kwargs : dict
            Additional keyword arguments supported by SMT objects

        """

        if train_method == 'IDW':
            self.trained = IDW(**kwargs)
        elif train_method == 'KPLS':
            self.trained = KPLS(**kwargs)
        elif train_method == 'KPLSK':
            self.trained = KPLSK(**kwargs)
        elif train_method == 'KRG':
            self.trained = KRG(**kwargs)
        elif train_method == 'LS':
            self.trained = LS(**kwargs)
        elif train_method == 'QP':
            self.trained = QP(**kwargs)
        elif train_method == 'RBF':
            self.trained = RBF(**kwargs)
        elif train_method == 'RMTB':
            self.trained = RMTB(xlimits=self.limits, **kwargs)
        elif train_method == 'RMTC':
            self.trained = RMTC(xlimits=self.limits, **kwargs)
        else:
            raise ValueError(
                'train_method must be one between IDW, KPLS, KPLSK, KRG, LS, QP, RBF, RMTB, RMTC'
            )

        self.trained.set_training_values(self.x_samp, self.m_prop)
        self.trained.train()
Beispiel #8
0
    def setUp(self):
        ndim = 3
        nt = 100
        ne = 100
        ncomp = 1

        problems = OrderedDict()
        problems["exp"] = TensorProduct(ndim=ndim, func="exp")
        problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
        problems["cos"] = TensorProduct(ndim=ndim, func="cos")

        sms = OrderedDict()
        sms["LS"] = LS()
        sms["QP"] = QP()
        sms["KRG"] = KRG(theta0=[1e-2] * ndim)
        sms["MFK"] = MFK(theta0=[1e-2] * ndim)
        sms["KPLS"] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
        sms["KPLSK"] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
        sms["GEKPLS"] = GEKPLS(theta0=[1e-2] * ncomp,
                               n_comp=ncomp,
                               delta_x=1e-1)
        sms["GENN"] = genn()
        if compiled_available:
            sms["IDW"] = IDW()
            sms["RBF"] = RBF()
            sms["RMTC"] = RMTC()
            sms["RMTB"] = RMTB()

        t_errors = {}
        t_errors["LS"] = 1.0
        t_errors["QP"] = 1.0
        t_errors["KRG"] = 1e0
        t_errors["MFK"] = 1e0
        t_errors["KPLS"] = 1e0
        t_errors["KPLSK"] = 1e0
        t_errors["GEKPLS"] = 1e0
        t_errors["GENN"] = 1e0
        if compiled_available:
            t_errors["IDW"] = 1e0
            t_errors["RBF"] = 1e-2
            t_errors["RMTC"] = 1e-1
            t_errors["RMTB"] = 1e-1

        e_errors = {}
        e_errors["LS"] = 1.5
        e_errors["QP"] = 1.5
        e_errors["KRG"] = 1e-2
        e_errors["MFK"] = 1e-2
        e_errors["KPLS"] = 1e-2
        e_errors["KPLSK"] = 1e-2
        e_errors["GEKPLS"] = 1e-2
        e_errors["GENN"] = 1e-2
        if compiled_available:
            e_errors["IDW"] = 1e0
            e_errors["RBF"] = 1e0
            e_errors["RMTC"] = 2e-1
            e_errors["RMTB"] = 2e-1

        self.nt = nt
        self.ne = ne
        self.ndim = ndim
        self.problems = problems
        self.sms = sms
        self.t_errors = t_errors
        self.e_errors = e_errors
Beispiel #9
0
# an example using 2 principal components.

t = KPLS(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False, corr='abs_exp')
t.set_training_values(xt, yt[:, 0])

t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print('KPLS + abs exp,  err: ' + str(compute_rms_error(t, xtest, ytest)))

########### The KPLSK model

# 'n_comp' and 'theta0' must be an integer in [1,ndim[ and a list of length n_comp, respectively.

t = KPLSK(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False)
t.set_training_values(xt, yt[:, 0])

t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print("KPLSK,  err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
    k, l = 0, 0
    f, axarr = plt.subplots(4, 3)
    axarr[k, l].plot(ytest, ytest, "-.")
    axarr[k, l].plot(ytest, y, ".")
    l += 1
    axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
    axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
Beispiel #10
0
 def test_smt_kplsk(self):
     self._check_smt(KPLSK(theta0=[1e-2]))
    xt = training_set[:, 0:3]
    yt = training_set[:, 5]

    # train the model

    # define a RMTS spline interpolant
    # TODO replace with a different SMT surrogate
    limits = np.array([[0.2, 0.8], [0.05, 1.0], [0.0, 3.5]])
    sm = RMTB(print_global=False,
              order=3,
              xlimits=limits,
              nonlinear_maxiter=100)
    # sm1 = KRG(hyper_opt='TNC', corr='abs_exp')
    # sm2 = KPLS(n_comp=3, corr='abs_exp', hyper_opt='TNC')
    sm3 = KPLSK(print_global=False,
                n_comp=3,
                theta0=np.ones(3),
                corr='squar_exp')
    sm4 = QP(print_global=False, )
    sm5 = LS(print_global=False, )
    sm1 = KPLS(print_global=False, n_comp=3, theta0=np.ones(3), corr='abs_exp')
    sm2 = KRG(print_global=False, theta0=np.ones(3), corr='abs_exp')
    sm6 = MOE(smooth_recombination=False, n_clusters=2)
    experts_list = dict()
    experts_list['KRG'] = (KRG, {'theta0': np.ones(3), 'corr': 'abs_exp'})
    experts_list['RBF'] = (RBF, dict())
    experts_list['KPLS'] = (KPLS, {
        'n_comp': 3,
        'theta0': np.ones(3),
        'corr': 'abs_exp'
    })
    experts_list['KPLSK'] = (KPLSK, {