示例#1
0
    def test_mfk(self):
        self.problems = ["exp", "tanh", "cos"]

        for fname in self.problems:
            prob = TensorProduct(ndim=self.ndim, func=fname)
            sampling = FullFactorial(xlimits=prob.xlimits, clip=True)

            np.random.seed(0)
            xt = sampling(self.nt)
            yt = prob(xt)
            for i in range(self.ndim):
                yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)

            y_lf = 2 * prob(xt) + 2
            x_lf = deepcopy(xt)
            np.random.seed(1)
            xe = sampling(self.ne)
            ye = prob(xe)

            sm = MFK(theta0=[1e-2] * self.ndim)
            if sm.options.is_declared("xlimits"):
                sm.options["xlimits"] = prob.xlimits
            sm.options["print_global"] = False

            sm.set_training_values(xt, yt[:, 0])
            sm.set_training_values(x_lf, y_lf[:, 0], name=0)

            with Silence():
                sm.train()

            t_error = compute_rms_error(sm)
            e_error = compute_rms_error(sm, xe, ye)

            self.assert_error(t_error, 0.0, 1)
            self.assert_error(e_error, 0.0, 1)
示例#2
0
def cokriging(i):
    # Xt_e = np.array([[1.57894737, 17.8947368],
    #                  [8.42105263, 17.8947368],
    #                  [1.57894737, 2.10526316],
    #                  [8.42105263, 2.10526316],
    #                  ])
    Xt_e = np.array([
        [50.0891841, 11.000994],
        [49.9108168, 38.999006],
        [84.200043, 41.5655806],
        [16.0943345, 7.97111717],
        [15.7955774, 41.584603],
        [83.9713379, 7.94023456],
    ])
    Xt_c = arr_xy_low

    yt_e = arr_stress_high_train[i]
    yt_c = np.array(low_predict_data_by_force()[0][i])

    sm = MFK(theta0=np.array(Xt_e.shape[1] * [1.0]), print_global=False)
    sm.set_training_values(Xt_c, yt_c, name=0)
    sm.set_training_values(Xt_e, yt_e)
    sm.train()

    Xp = arr_xy_high
    y = sm.predict_values(Xp)
    # MSE = sm.predict_variances(Xp)
    # der = sm.predict_derivatives(Xp, kx=0)
    return y.flatten()
示例#3
0
    def test_mfk_derivs(self):

        prob = Sphere(ndim=self.ndim)
        sampling = LHS(xlimits=prob.xlimits)

        nt = 500
        np.random.seed(0)
        xt = sampling(nt)
        yt = prob(xt)
        dyt = {}
        for kx in range(prob.xlimits.shape[0]):
            dyt[kx] = prob(xt, kx=kx)

        y_lf = 2 * prob(xt) + 2
        x_lf = deepcopy(xt)

        np.random.seed(1)
        xe = sampling(self.ne)
        ye = prob(xe)
        dye = {}
        for kx in range(prob.xlimits.shape[0]):
            dye[kx] = prob(xe, kx=kx)

        sm = MFK(theta0=[1e-2] * self.ndim)
        if sm.options.is_declared("xlimits"):
            sm.options["xlimits"] = prob.xlimits
        sm.options["print_global"] = False

        sm.set_training_values(xt, yt)
        sm.set_training_values(x_lf, y_lf, name=0)

        with Silence():
            sm.train()

        t_error = compute_rms_error(sm)
        e_error = compute_rms_error(sm, xe, ye)
        e_error0 = compute_rms_error(sm, xe, dye[0], 0)
        e_error1 = compute_rms_error(sm, xe, dye[1], 1)

        if print_output:
            print("%8s %6s %18.9e %18.9e %18.9e %18.9e" %
                  (pname[:6], sname, t_error, e_error, e_error0, e_error1))

        self.assert_error(e_error0, 0.0, 1e-1)
        self.assert_error(e_error1, 0.0, 1e-1)
示例#4
0
    def setUp(self):
        ndim = 2
        nt = 5000
        ne = 100

        problems = OrderedDict()
        problems["sphere"] = Sphere(ndim=ndim)

        sms = OrderedDict()
        if compiled_available:
            sms["RBF"] = RBF()
            sms["RMTC"] = RMTC()
            sms["RMTB"] = RMTB()
            sms["MFK"] = MFK(theta0=[1e-2] * ndim)

        self.nt = nt
        self.ne = ne
        self.problems = problems
        self.sms = sms
示例#5
0
    def setUp(self):
        ndim = 3
        nt = 100
        ne = 100
        ncomp = 1

        problems = OrderedDict()
        problems["exp"] = TensorProduct(ndim=ndim, func="exp")
        problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
        problems["cos"] = TensorProduct(ndim=ndim, func="cos")

        sms = OrderedDict()
        sms["LS"] = LS()
        sms["QP"] = QP()
        sms["KRG"] = KRG(theta0=[1e-2] * ndim)
        sms["MFK"] = MFK(theta0=[1e-2] * ndim)
        sms["KPLS"] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
        sms["KPLSK"] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
        sms["GEKPLS"] = GEKPLS(theta0=[1e-2] * ncomp,
                               n_comp=ncomp,
                               delta_x=1e-1)
        sms["GENN"] = genn()
        if compiled_available:
            sms["IDW"] = IDW()
            sms["RBF"] = RBF()
            sms["RMTC"] = RMTC()
            sms["RMTB"] = RMTB()

        t_errors = {}
        t_errors["LS"] = 1.0
        t_errors["QP"] = 1.0
        t_errors["KRG"] = 1e0
        t_errors["MFK"] = 1e0
        t_errors["KPLS"] = 1e0
        t_errors["KPLSK"] = 1e0
        t_errors["GEKPLS"] = 1e0
        t_errors["GENN"] = 1e0
        if compiled_available:
            t_errors["IDW"] = 1e0
            t_errors["RBF"] = 1e-2
            t_errors["RMTC"] = 1e-1
            t_errors["RMTB"] = 1e-1

        e_errors = {}
        e_errors["LS"] = 1.5
        e_errors["QP"] = 1.5
        e_errors["KRG"] = 1e-2
        e_errors["MFK"] = 1e-2
        e_errors["KPLS"] = 1e-2
        e_errors["KPLSK"] = 1e-2
        e_errors["GEKPLS"] = 1e-2
        e_errors["GENN"] = 1e-2
        if compiled_available:
            e_errors["IDW"] = 1e0
            e_errors["RBF"] = 1e0
            e_errors["RMTC"] = 2e-1
            e_errors["RMTB"] = 2e-1

        self.nt = nt
        self.ne = ne
        self.ndim = ndim
        self.problems = problems
        self.sms = sms
        self.t_errors = t_errors
        self.e_errors = e_errors
示例#6
0
    def run_mfk_example(self):
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.applications import MFK

        # Define the
        def LF_function(x):
            import numpy as np

            return (0.5 * ((x * 6 - 2)**2) * np.sin(
                (x * 6 - 2) * 2) + (x - 0.5) * 10.0 - 5)

        def HF_function(x):
            import numpy as np

            return ((x * 6 - 2)**2) * np.sin((x * 6 - 2) * 2)

        # Problem set up
        ndim = 1
        Xt_e = np.linspace(0, 1, 4, endpoint=True).reshape(-1, ndim)
        Xt_c = np.linspace(0, 1, 11, endpoint=True).reshape(-1, ndim)

        nt_exp = Xt_e.shape[0]
        nt_cheap = Xt_c.shape[0]

        # Evaluate the HF and LF functions
        yt_e = HF_function(Xt_e)
        yt_c = LF_function(Xt_c)

        sm = MFK(theta0=np.array(Xt_e.shape[1] * [1.0]))

        # low-fidelity dataset names being integers from 0 to level-1
        sm.set_training_values(Xt_c, yt_c, name=0)
        # high-fidelity dataset without name
        sm.set_training_values(Xt_e, yt_e)

        # train the model
        sm.train()

        x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)

        # query the outputs
        y = sm.predict_values(x)
        MSE = sm.predict_variances(x)
        der = sm.predict_derivatives(x, kx=0)

        plt.figure()

        plt.plot(x, HF_function(x), label="reference")
        plt.plot(x, y, linestyle="-.", label="mean_gp")
        plt.scatter(Xt_e, yt_e, marker="o", color="k", label="HF doe")
        plt.scatter(Xt_c, yt_c, marker="*", color="g", label="LF doe")

        plt.legend(loc=0)
        plt.ylim(-10, 17)
        plt.xlim(-0.1, 1.1)
        plt.xlabel(r"$x$")
        plt.ylabel(r"$y$")

        plt.show()
示例#7
0
    return (6 * x - 2)**2 * np.sin(12 * x - 4)


# Problem set up
ndim = 1
Xt_e = np.linspace(0, 1, 4).reshape(-1, ndim)
Xt_c = np.linspace(0, 1, 11).reshape(-1, ndim)

nt_exp = Xt_e.shape[0]
nt_cheap = Xt_c.shape[0]

# Evaluate the HF and LF functions
yt_e = HF_function(Xt_e)
yt_c = LF_function(Xt_c)

sm = MFK(theta0=np.array(Xt_e.shape[1] * [1.0]), print_global=False)

print(Xt_e.shape[1] * [1.0])

# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(Xt_c, yt_c, name=0)
print(Xt_c)
# high-fidelity dataset without name
sm.set_training_values(Xt_e, yt_e)
print(Xt_e)

# train the model
sm.train()

x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)