Ejemplo n.º 1
0
    def run_mfk_example():
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.applications.mfk import MFK, NestedLHS

        # low fidelity model
        def lf_function(x):
            import numpy as np

            return (0.5 * ((x * 6 - 2)**2) * np.sin(
                (x * 6 - 2) * 2) + (x - 0.5) * 10.0 - 5)

        # high fidelity model
        def hf_function(x):
            import numpy as np

            return ((x * 6 - 2)**2) * np.sin((x * 6 - 2) * 2)

        # Problem set up
        xlimits = np.array([[0.0, 1.0]])
        xdoes = NestedLHS(nlevel=2, xlimits=xlimits)
        xt_c, xt_e = xdoes(7)

        # Evaluate the HF and LF functions
        yt_e = hf_function(xt_e)
        yt_c = lf_function(xt_c)

        sm = MFK(theta0=np.array(xt_e.shape[1] * [1.0]))

        # low-fidelity dataset names being integers from 0 to level-1
        sm.set_training_values(xt_c, yt_c, name=0)
        # high-fidelity dataset without name
        sm.set_training_values(xt_e, yt_e)

        # train the model
        sm.train()

        x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)

        # query the outputs
        y = sm.predict_values(x)
        mse = sm.predict_variances(x)
        derivs = sm.predict_derivatives(x, kx=0)

        plt.figure()

        plt.plot(x, hf_function(x), label="reference")
        plt.plot(x, y, linestyle="-.", label="mean_gp")
        plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
        plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")

        plt.legend(loc=0)
        plt.ylim(-10, 17)
        plt.xlim(-0.1, 1.1)
        plt.xlabel(r"$x$")
        plt.ylabel(r"$y$")

        plt.show()
Ejemplo n.º 2
0
    def run_mfk_example_1fidelity():
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.applications.mfk import MFK, NestedLHS

        # Consider only 1 fidelity level
        # high fidelity model
        def hf_function(x):
            import numpy as np

            return ((x * 6 - 2)**2) * np.sin((x * 6 - 2) * 2)

        # Problem set up
        xlimits = np.array([[0.0, 1.0]])
        xdoes = NestedLHS(nlevel=1, xlimits=xlimits, random_state=0)
        xt_e = xdoes(7)[0]

        # Evaluate the HF function
        yt_e = hf_function(xt_e)

        sm = MFK(theta0=xt_e.shape[1] * [1.0])

        # High-fidelity dataset without name
        sm.set_training_values(xt_e, yt_e)

        # Train the model
        sm.train()

        x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)

        # Query the outputs
        y = sm.predict_values(x)
        mse = sm.predict_variances(x)
        derivs = sm.predict_derivatives(x, kx=0)

        plt.figure()

        plt.plot(x, hf_function(x), label="reference")
        plt.plot(x, y, linestyle="-.", label="mean_gp")
        plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")

        plt.legend(loc=0)
        plt.ylim(-10, 17)
        plt.xlim(-0.1, 1.1)
        plt.xlabel(r"$x$")
        plt.ylabel(r"$y$")

        plt.show()
Ejemplo n.º 3
0
    def test_nested_lhs(self):
        xlimits = np.array([[0.0, 1.0], [0.0, 1.0]])
        xnorm = NestedLHS(nlevel=3, xlimits=xlimits, random_state=0)
        xlow, xmedium, xhigh = xnorm(15)

        for items1 in xmedium:
            found = False
            for items0 in xlow:
                if items1.all() == items0.all():
                    found = True
            self.assertTrue(found)

        for items1 in xhigh:
            found = False
            for items0 in xmedium:
                if items1.all() == items0.all():
                    found = True
            self.assertTrue(found)
Ejemplo n.º 4
0
    def test_mfk_variance(self):

        # To create the doe
        # dim = 2
        nlevel = 2
        ub0 = 10.0
        ub1 = 15.0
        lb0 = -5.0
        lb1 = 0.0
        xlimits = np.array([[lb0, ub0], [lb1, ub1]])

        # Constants
        n_HF = 5  # number of high fidelity points (number of low fi is twice)
        xdoes = NestedLHS(nlevel=nlevel, xlimits=xlimits)
        x_t_lf, x_t_hf = xdoes(n_HF)

        # Evaluate the HF and LF functions
        y_t_lf = LF(x_t_lf)
        y_t_hf = HF(x_t_hf)

        sm = MFK(
            theta0=x_t_hf.shape[1] * [1e-2],
            print_global=False,
            rho_regr="constant",
        )

        # low-fidelity dataset names being integers from 0 to level-1
        sm.set_training_values(x_t_lf, y_t_lf, name=0)
        # high-fidelity dataset without name
        sm.set_training_values(x_t_hf, y_t_hf)
        # train the model
        sm.train()

        # Validation set
        # for validation with LHS
        ntest = 1
        sampling = LHS(xlimits=xlimits)
        x_test_LHS = sampling(ntest)
        # y_test_LHS = HF(x_test_LHS)

        # compare the mean value between different formula
        if print_output:
            print("Mu sm  : {}".format(sm.predict_values(x_test_LHS)[0, 0]))
            print("Mu LG_sm : {}".format(
                TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0]))
            print("Mu LG_LG : {}".format(
                TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0]))

        # self.assertAlmostEqual(
        #     TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0],
        #     TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
        #     delta=1,
        # )
        self.assertAlmostEqual(
            sm.predict_values(x_test_LHS)[0, 0],
            TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
            delta=1,
        )

        # compare the variance value between different formula
        (k_0_LG_sm,
         k_1_LG_sm) = TestMFK_variance.Cov_LG_sm(x_test_LHS, x_test_LHS, sm)
        (k_0_LG_LG,
         k_1_LG_LG) = TestMFK_variance.Cov_LG_LG(x_test_LHS, x_test_LHS, sm)
        k_0_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 0]
        k_1_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 1]

        if print_output:
            print("Level 0")
            print("Var sm  : {}".format(k_0_sm))
            print("Var LG_sm : {}".format(k_0_LG_sm[0, 0]))
            print("Var LG_LG : {}".format(k_0_LG_LG[0, 0]))

            print("Level 1")
            print("Var sm  : {}".format(k_1_sm))
            print("Var LG_sm : {}".format(k_1_LG_sm[0, 0]))
            print("Var LG_LG : {}".format(k_1_LG_LG[0, 0]))

        # for level 0
        self.assertAlmostEqual(k_0_sm, k_0_LG_sm[0, 0], delta=1)
        self.assertAlmostEqual(k_0_LG_sm[0, 0], k_0_LG_LG[0, 0], delta=1)
        # for level 1
        self.assertAlmostEqual(k_1_sm, k_1_LG_sm[0, 0], delta=1)
        self.assertAlmostEqual(k_1_LG_sm[0, 0], k_1_LG_LG[0, 0], delta=1)

        (
            beta_sm_1,
            sigma2_sm_1,
            beta_sm_2,
            sigma2_sm_2,
            rho_sm,
            sigma2_rho_sm,
            beta_LG_1,
            sigma2_LG_1,
            beta_LG_2,
            sigma2_LG_2,
            rho_LG,
            sigma2_rho_LG,
        ) = TestMFK_variance.verif_hyperparam(sm, x_test_LHS)
        if print_output:
            print("Hyperparameters")
            print("rho_sm : {}".format(rho_sm))
            print("rho_LG : {}".format(rho_LG))
            print("sigma2_rho_sm : {}".format(sigma2_rho_sm[0]))
            print("sigma2_rho_LG : {}".format(sigma2_rho_LG))
            print("beta_sm_1 : {}".format(beta_sm_1))
            print("beta_LG_1 : {}".format(beta_LG_1[0, 0]))
            print("beta_sm_2 : {}".format(beta_sm_2))
            print("beta_LG_2 : {}".format(beta_LG_2))
            print("sigma2_sm_1 : {}".format(sigma2_sm_1))
            print("sigma2_LG_1 : {}".format(sigma2_LG_1))
            print("sigma2_sm_2 : {}".format(sigma2_sm_2))
            print("sigma2_LG_2 : {}".format(sigma2_LG_2))
Ejemplo n.º 5
0
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkplsk import MFKPLSK

from testbed_components import simple_1D_low, simple_1D_high

# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits)
xt_c, xt_e = xdoes(4)

# Evaluate the HF and LF functions
yt_e = simple_1D_high(xt_e)
yt_c = simple_1D_low(xt_c)

# choice of number of PLS components
ncomp = 1
sm = MFKPLSK(n_comp=ncomp, theta0=np.array(ncomp * [1.0]))

# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)

# train the model
sm.train()

x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)

# query the outputs