Пример #1
0
    def test_mfk(self):
        self.problems = ["exp", "tanh", "cos"]

        for fname in self.problems:
            prob = TensorProduct(ndim=self.ndim, func=fname)
            sampling = FullFactorial(xlimits=prob.xlimits, clip=True)

            np.random.seed(0)
            xt = sampling(self.nt)
            yt = prob(xt)
            for i in range(self.ndim):
                yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)

            y_lf = 2 * prob(xt) + 2
            x_lf = deepcopy(xt)
            np.random.seed(1)
            xe = sampling(self.ne)
            ye = prob(xe)

            sm = MFK(theta0=[1e-2] * self.ndim)
            if sm.options.is_declared("xlimits"):
                sm.options["xlimits"] = prob.xlimits
            sm.options["print_global"] = False

            sm.set_training_values(xt, yt[:, 0])
            sm.set_training_values(x_lf, y_lf[:, 0], name=0)

            with Silence():
                sm.train()

            t_error = compute_rms_error(sm)
            e_error = compute_rms_error(sm, xe, ye)

            self.assert_error(t_error, 0.0, 1)
            self.assert_error(e_error, 0.0, 1)
def gEKPLS(xt, yt, xtest, ytest, funXLimits, ndim):
    # 'n_comp' must be an integer in [1,ndim[,  'theta0' a list of n_comp values

    t = GEKPLS(n_comp=2,
               theta0=[1e-2, 1e-2],
               xlimits=funXLimits,
               delta_x=1e-2,
               extra_points=1,
               print_prediction=False)
    t.set_training_values(xt, yt)
    # Add the gradient information
    for i in range(ndim):
        t.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)),
                                   i)
    t.train()

    # Prediction of the validation points
    y = t.predict_values(xtest)
    print('GEKPLS1,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    title = ('GEKPLS model')
    return t, title, xtest, ytest

    # Prediction of the derivatives with regards to each direction space
    yd_prediction = np.zeros((ntest, ndim))
    for i in range(ndim):
        yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
        print('GEKPLS1, err of the ' + str(i + 1) + '-th derivative: ' +
              str(compute_rms_error(t, xtest, ydtest[:, i], kx=i)))

        if plot_status:
            plt.plot(ydtest[:, i], ydtest[:, i], '-.')
            plt.plot(ydtest[:, i], yd_prediction[:, i], '.')

        if plot_status:
            plt.show()
Пример #3
0
    def test_mfk_1fidelity(self):
        self.problems = ["exp", "tanh", "cos"]

        for fname in self.problems:
            prob = TensorProduct(ndim=self.ndim, func=fname)
            sampling = LHS(xlimits=prob.xlimits, random_state=0)

            np.random.seed(0)
            xt = sampling(self.nt)
            yt = prob(xt)
            for i in range(self.ndim):
                yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)

            sampling = LHS(xlimits=prob.xlimits, random_state=1)
            xv = sampling(self.ne)
            yv = prob(xv)

            sm = MFK(
                theta0=[1e-2] * self.ndim,
                print_global=False,
            )

            sm.set_training_values(xt, yt[:, 0])

            with Silence():
                sm.train()

            t_error = compute_rms_error(sm)
            e_error = compute_rms_error(sm, xv, yv)

            self.assert_error(t_error, 0.0, 1e-6)
            self.assert_error(e_error, 0.0, 1e-6)
Пример #4
0
    def test_mfkplsk_derivs(self):

        if self.ndim < 2:
            print("To try test_mfkplsk_derivs the dimension must be greater than 1")

        prob = Sphere(ndim=self.ndim)
        sampling = LHS(xlimits=prob.xlimits)

        # Modif MM
        nt = 100
        np.random.seed(0)
        xt = sampling(nt)
        yt = prob(xt)
        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        y_lf = 2 * prob(xt) + 2
        x_lf = deepcopy(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        # modif MM
        sm = MFKPLSK()

        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        # to test some options
        sm.options["eval_noise"] = False

        # modif MM
        sm.options["n_comp"] = self.n_comp
        sm.options["theta0"] = [1e-2] * self.n_comp
        sm.set_training_values(xt, yt)
        sm.set_training_values(x_lf, y_lf, name=0)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
        e_error0 = compute_rms_error(sm, xe, dye[0], 0)
        e_error1 = compute_rms_error(sm, xe, dye[1], 1)

        if print_output:
            print(
                "%8s %6s %18.9e %18.9e %18.9e %18.9e"
                % (pname[:6], sname, t_error, e_error, e_error0, e_error1)
            )

        self.assert_error(e_error0, 0.0, 1e-1)
        self.assert_error(e_error1, 0.0, 1e-1)
Пример #5
0
    def run_MF_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split("_")[1]
        sname = method_name.split("_")[2]

        prob = self.problems[pname]
        sampling = LHS(xlimits=prob.xlimits)

        nt = 500
        np.random.seed(0)
        xt = sampling(500)
        yt = prob(xt)
        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        y_lf = 2 * prob(xt) + 2
        x_lf = deepcopy(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)
        sm.set_training_values(x_lf, y_lf, name=0)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
        e_error0 = compute_rms_error(sm, xe, dye[0], 0)
        e_error1 = compute_rms_error(sm, xe, dye[1], 1)

        if print_output:
            print(
                "%8s %6s %18.9e %18.9e %18.9e %18.9e"
                % (pname[:6], sname, t_error, e_error, e_error0, e_error1)
            )

        self.assert_error(e_error0, 0.0, 1e-1)
        self.assert_error(e_error1, 0.0, 1e-1)
Пример #6
0
    def run_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split('_')[1]
        sname = method_name.split('_')[2]

        prob = self.problems[pname]
        sampling = LHS(xlimits=prob.xlimits)
        nt = self.nt

        if sname == 'MFK':
            nt = 500

        np.random.seed(0)
        xt = sampling(nt)
        yt = prob(xt)

        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared('xlimits'):
            sm.options['xlimits'] = prob.xlimits
        sm.options['print_global'] = False

        sm.set_training_values(xt, yt)

        #         with Silence():
        if True:
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
        e_error0 = compute_rms_error(sm, xe, dye[0], 0)
        e_error1 = compute_rms_error(sm, xe, dye[1], 1)

        if print_output:
            print('%8s %6s %18.9e %18.9e %18.9e %18.9e' %
                  (pname[:6], sname, t_error, e_error, e_error0, e_error1))

        self.assert_error(e_error0, 0., 2e-1)
        self.assert_error(e_error1, 0., 2e-1)
Пример #7
0
    def run_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split("_")[1]
        sname = method_name.split("_")[2]

        prob = self.problems[pname]
        sampling = FullFactorial(xlimits=prob.xlimits, clip=True)

        np.random.seed(0)
        xt = sampling(self.nt)
        yt = prob(xt)
        print(prob(xt, kx=0).shape)
        for i in range(self.ndim):
            yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        if sname in ["KPLS", "KRG", "KPLSK", "GEKPLS"]:
            optname = method_name.split("_")[3]
            sm.options["hyper_opt"] = optname

        sm.set_training_values(xt, yt[:, 0])
        if sm.supports["training_derivatives"]:
            for i in range(self.ndim):
                sm.set_training_derivatives(xt, yt[:, i + 1], i)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)

        if sm.supports["variances"]:
            sm.predict_variances(xe)

        if pname == "cos":
            self.assertLessEqual(e_error, self.e_errors[sname] + 1.5)
        else:
            self.assertLessEqual(e_error, self.e_errors[sname] + 1e-4)
        self.assertLessEqual(t_error, self.t_errors[sname] + 1e-4)
Пример #8
0
    def test_mfk_derivs(self):

        prob = Sphere(ndim=self.ndim)
        sampling = LHS(xlimits=prob.xlimits)

        nt = 500
        np.random.seed(0)
        xt = sampling(nt)
        yt = prob(xt)
        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        y_lf = 2 * prob(xt) + 2
        x_lf = deepcopy(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        sm = MFK(theta0=[1e-2] * self.ndim)
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)
        sm.set_training_values(x_lf, y_lf, name=0)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
        e_error0 = compute_rms_error(sm, xe, dye[0], 0)
        e_error1 = compute_rms_error(sm, xe, dye[1], 1)

        if print_output:
            print(
                "%8s %6s %18.9e %18.9e %18.9e %18.9e"
                % (pname[:6], sname, t_error, e_error, e_error0, e_error1)
            )

        self.assert_error(e_error0, 0.0, 1e-1)
        self.assert_error(e_error1, 0.0, 1e-1)
Пример #9
0
    def test_vfm(self):
        # Problem set up
        ndim = 8
        ntest = 500
        ndoeLF = int(10 * ndim)
        ndoeHF = int(3)
        funLF = WaterFlowLFidelity(ndim=ndim)
        funHF = WaterFlow(ndim=ndim)
        deriv1 = True
        deriv2 = True
        LF_candidate = "QP"
        Bridge_candidate = "KRG"
        type_bridge = "Multiplicative"
        optionsLF = {}
        optionsB = {"theta0": [1e-2] * ndim, "print_prediction": False, "deriv": False}

        # Construct low/high fidelity data and validation points
        sampling = LHS(xlimits=funLF.xlimits, criterion="m", random_state=42)
        xLF = sampling(ndoeLF)
        yLF = funLF(xLF)
        if deriv1:
            dy_LF = np.zeros((ndoeLF, 1))
            for i in range(ndim):
                yd = funLF(xLF, kx=i)
                dy_LF = np.concatenate((dy_LF, yd), axis=1)

        sampling = LHS(xlimits=funHF.xlimits, criterion="m", random_state=43)
        xHF = sampling(ndoeHF)
        yHF = funHF(xHF)
        if deriv2:
            dy_HF = np.zeros((ndoeHF, 1))
            for i in range(ndim):
                yd = funHF(xHF, kx=i)
                dy_HF = np.concatenate((dy_HF, yd), axis=1)

        xtest = sampling(ntest)
        ytest = funHF(xtest)
        dytest = np.zeros((ntest, ndim))
        for i in range(ndim):
            dytest[:, i] = funHF(xtest, kx=i).T

        # Initialize VFM
        vfm = VFM(
            type_bridge=type_bridge,
            name_model_LF=LF_candidate,
            name_model_bridge=Bridge_candidate,
            X_LF=xLF,
            y_LF=yLF,
            X_HF=xHF,
            y_HF=yHF,
            options_LF=optionsLF,
            options_bridge=optionsB,
            dy_LF=dy_LF,
            dy_HF=dy_HF,
        )

        # Prediction of the validation points
        rms_error = compute_rms_error(vfm, xtest, ytest)
        self.assert_error(rms_error, 0.0, 3e-1)
def quadraticModel(xt, yt, xtest, ytest):
    ########### The QP model

    t = QP(print_prediction=False)
    t.set_training_values(xt, yt)

    t.train()
    title = 'QP model: validation of the prediction model'
    print('QP,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    return t, title, xtest, ytest
Пример #11
0
    def run_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split("_")[1]
        sname = method_name.split("_")[2]

        prob = self.problems[pname]

        sampling = LHS(xlimits=prob.xlimits, random_state=42)

        np.random.seed(0)
        xt = sampling(self.nt)
        yt = prob(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)

        with Silence():
            sm.train()

        l = sm.options["n_comp"]

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)

        if print_output:
            print("%8s %6s %18.9e %18.9e" %
                  (pname[:6], sname, t_error, e_error))

        self.assert_error(t_error, 0.0, self.t_errors[sname], 1e-5)
        self.assert_error(e_error, 0.0, self.e_errors[sname], 1e-5)
        self.assertEqual(l, self.n_comp_opt[pname])
def kPLSK(xt, yt, xtest, ytest):
    ########### The KPLSK model
    # 'n_comp' and 'theta0' must be an integer in [1,ndim[ and a list of length n_comp, respectively.

    t = KPLSK(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    print('KPLSK,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    title = 'KPLSK model: validation of the prediction model'
    return t, title, xtest, ytest
def iDW(xt, yt, xtest, ytest):
    ########### The IDW model

    t = IDW(print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    # Prediction of the validation points
    y = t.predict_values(xtest)
    print('IDW,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    title = 'IDW'
    return t, title, xtest, ytest
def linearModel(xt, yt, xtest, ytest):  #model needs floats
    # Initialization of the model
    t = LS(print_prediction=False)
    # Add the DOE
    t.set_training_values(xt, yt)

    # Train the model
    t.train()
    title = 'LS model: validation of the prediction model'
    # Prediction of the validation points
    print('RBF,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    return t, title, xtest, ytest
Пример #15
0
    def test_linear_search(self):
        for ls in ["bracketed", "cubic", "quadratic", "null"]:
            self.sms[ls] = RMTB(xlimits=self.xlimits,
                                line_search=ls,
                                print_global=False)
            self.sms[ls].set_training_values(self.xt, self.yt)

            with Silence():
                self.sms[ls].train()

            error = compute_rms_error(self.sms[ls], self.xref, self.yref)
            self.assert_error(error, 0.0, 1e-1)
def rBF(xt, yt, xtest, ytest):
    t = RBF(print_prediction=False, poly_degree=0)
    t.set_training_values(xt, yt)
    t.train()

    # Prediction of the validation points
    y = t.predict_values(xtest)
    print('RBF,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    # Plot prediction/true values

    title = 'RBF model'
    return t, title, xtest, ytest
def rMTCSimba(xt, yt, xtest, ytest, funXLimits):
    t = RMTC(xlimits=funXLimits,
             min_energy=True,
             nonlinear_maxiter=20,
             print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    # Prediction of the validation points
    print('RMTC,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    title = 'RMTC model'
    return t, title, xtest, ytest
Пример #18
0
    def run_MF_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split("_")[1]
        sname = method_name.split("_")[2]

        prob = self.problems[pname]
        sampling = FullFactorial(xlimits=prob.xlimits, clip=True)

        np.random.seed(0)
        xt = sampling(self.nt)
        yt = prob(xt)
        print(prob(xt, kx=0).shape)
        for i in range(self.ndim):
            yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)

        y_lf = 2 * prob(xt) + 2
        x_lf = deepcopy(xt)
        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt[:, 0])
        sm.set_training_values(x_lf, y_lf[:, 0], name=0)
        if sm.supports["training_derivatives"]:
            for i in range(self.ndim):
                sm.set_training_derivatives(xt, yt[:, i + 1], i)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
Пример #19
0
    def run_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split('_')[1]
        sname = method_name.split('_')[2]

        prob = self.problems[pname]
        sampling = LHS(xlimits=prob.xlimits)

        np.random.seed(0)
        xt = sampling(self.nt)
        yt = prob(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared('xlimits'):
            sm.options['xlimits'] = prob.xlimits
        sm.options['print_global'] = False

        sm.set_training_values(xt, yt)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)

        if print_output:
            print('%8s %6s %18.9e %18.9e'
                  % (pname[:6], sname, t_error, e_error))

        self.assert_error(t_error, 0., self.t_errors[sname])
        self.assert_error(e_error, 0., self.e_errors[sname])
def rMTBSimba(xt, yt, xtest, ytest, funXLimits):
    t = RMTB(xlimits=funXLimits,
             min_energy=True,
             nonlinear_maxiter=20,
             print_prediction=False)
    t.set_training_values(xt, yt)
    # Add the gradient information
    #    for i in range(ndim):
    #        t.set_training_derivatives(xt,yt[:, 1+i].reshape((yt.shape[0],1)),i)
    t.train()

    # Prediction of the validation points
    print('RMTB,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    # plot prediction/true values
    title = 'RMTB'
    return t, title, xtest, ytest
def kriging(xt, yt, xtest, ytest, ndim):
    ########### The Kriging model

    # The variable 'theta0' is a list of length ndim.
    t = KRG(theta0=[1e-2] * ndim, print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    title = 'Kriging model: validation of the prediction model'
    print(title)
    print('Kriging,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    print("theta values", t.optimal_theta)

    # Plot the function, the prediction and the 95% confidence interval based on
    # the MSE
    return t, title, xtest, ytest
Пример #22
0
    def test_linear_solver(self):
        for ls in [
                "krylov-dense",
                "dense-chol",
                "lu",
                "ilu",
                "krylov",
                "krylov-lu",
                "krylov-mg",
                "gs",
                "jacobi",
                "mg",
                "null",
        ]:
            self.sms[ls] = RMTB(xlimits=self.xlimits,
                                solver=ls,
                                print_global=False)
            self.sms[ls].set_training_values(self.xt, self.yt)

            with Silence():
                self.sms[ls].train()

            error = compute_rms_error(self.sms[ls], self.xref, self.yref)
            self.assert_error(error, 0.0, 1.1)
Пример #23
0
for i in range(ndim):
    ydtest[:, i] = fun(xtest, kx=i).T

########### The LS model

# Initialization of the model
t = LS(print_prediction=False)
# Add the DOE
t.set_training_values(xt, yt[:, 0])

# Train the model
t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print('LS,  err: ' + str(compute_rms_error(t, xtest, ytest)))

if plot_status:
    k, l = 0, 0
    f, axarr = plt.subplots(4, 3)
    axarr[k, l].plot(ytest, ytest, '-.')
    axarr[k, l].plot(ytest, y, '.')
    l += 1
    axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
    axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
    axarr[3, 2].text(0.25, 0.4, r'$\hat{y}$')
    axarr[3, 2].text(0.35, 0.15, r'$y_{true}$')
    axarr[3, 2].axis('off')
    # Fine-tune figure; hide x ticks for top plots and y ticks for right plots
    plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
    plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
Пример #24
0
    def run_test(self):
        method_name = inspect.stack()[1][3]
        pname = method_name.split("_")[1]
        sname = method_name.split("_")[2]

        prob = self.problems[pname]
        sampling = FullFactorial(xlimits=prob.xlimits, clip=True)

        np.random.seed(0)
        xt = sampling(self.nt)
        yt = prob(xt)
        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        sm0 = self.sms[sname]

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)

        sm = sm0.__class__()
        sm.options = sm0.options.clone()
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)
        for kx in range(prob.xlimits.shape[0]):
            sm.set_training_derivatives(xt, dyt[kx], kx)

        with Silence():
            sm.train()

        ge_t_error = compute_rms_error(sm)
        ge_e_error = compute_rms_error(sm, xe, ye)

        if print_output:
            print("%8s %6s %18.9e %18.9e %18.9e %18.9e" %
                  (pname[:6], sname, t_error, e_error, ge_t_error, ge_e_error))

        self.assert_error(t_error, 0.0, self.t_errors[sname])
        self.assert_error(e_error, 0.0, self.e_errors[sname])
        self.assert_error(ge_t_error, 0.0, self.ge_t_errors[sname])
        self.assert_error(ge_e_error, 0.0, self.ge_e_errors[sname])
Пример #25
0
train2 = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=100.csv', delimiter=',')
train = x = np.append(train1, train2 , axis=0)
test = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=50.csv', delimiter=',')
xtest, ytest = test[:,:ndim], test[:,ndim]
xt, yt = train[:,:ndim], train[:,ndim:]

# The variable 'theta0' is a list of length ndim.
theta = [0.17675797, 0.0329642, 0.00175843, 0.0328348, 0.00039516, 0.08729705,
 0.00094059, 0.00018145, 0.04470183]
t = KRG(theta0=[1e-2]*ndim,print_prediction = False)
t.set_training_values(xt,yt[:,0])

t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print('Kriging,  err: '+ str(compute_rms_error(t,xtest,ytest)))

fig = plt.figure()
plt.plot(ytest, ytest, '-', label='$y_{true}$')
plt.plot(ytest, y, 'r.', label='$\hat{y}$')
       
plt.xlabel('$y_{true}$')
plt.ylabel('$\hat{y}$')
        
plt.legend(loc='upper left')
plt.title('Kriging model: validation of the prediction model, n = 300')
plt.show()

# Value of theta
print("theta values",  t.optimal_theta)
Пример #26
0
for i in range(ndim):
    ydtest[:, i] = fun(xtest, kx=i).T

########### The LS model

# Initialization of the model
t = LS(print_prediction=False)
# Add the DOE
t.set_training_values(xt, yt[:, 0])

# Train the model
t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print("LS,  err: " + str(compute_rms_error(t, xtest, ytest)))

if plot_status:
    k, l = 0, 0
    f, axarr = plt.subplots(4, 3)
    axarr[k, l].plot(ytest, ytest, "-.")
    axarr[k, l].plot(ytest, y, ".")
    l += 1
    axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
    axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
    axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
    axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
    axarr[3, 2].axis("off")
    # Fine-tune figure; hide x ticks for top plots and y ticks for right plots
    plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
    plt.setp(axarr[3, 2].get_yticklabels(), visible=False)