def update_hyperparameters(self, param):

        self.RBF_scale = param
        self.model = KRG(theta0=param,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)
Beispiel #2
0
    def test_variance_derivatives(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "matern32",
                "matern52",
        ]:
            kr = KRG(print_global=False)
            kr.options["poly"] = "constant"
            kr.options["corr"] = corr_str
            kr.set_training_values(self.X, self.y)
            kr.train()

            e = 1e-6
            xa = random.random()
            xb = random.random()
            x_valid = [[xa, xb], [xa + e, xb], [xa - e, xb], [xa, xb + e],
                       [xa, xb - e]]

            y_predicted = kr.predict_variances(np.array(x_valid))
            y_jacob = np.zeros((2, 5))

            for i in range(np.shape(x_valid)[0]):
                l = kr.predict_variance_derivatives(np.atleast_2d(
                    x_valid[i]))[0]
                y_jacob[:, i] = l

            diff_g = (y_predicted[1][0] - y_predicted[2][0]) / (2 * e)
            diff_d = (y_predicted[3][0] - y_predicted[4][0]) / (2 * e)

            jac_rel_error1 = abs((y_jacob[0] - diff_g) / y_jacob[0])
            self.assert_error(jac_rel_error1, 1e-3, atol=0.01, rtol=0.01)

            jac_rel_error2 = abs((y_jacob[1] - diff_d) / y_jacob[1])
            self.assert_error(jac_rel_error2, 1e-3, atol=0.01, rtol=0.01)
    def __init__(self, data, x_headers, y_header, RBF_scale=[2.91, 174, 12.8]):

        super().__init__(data, x_headers, y_header)

        self.RBF_scale = RBF_scale
        self.model = KRG(theta0=RBF_scale,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)
Beispiel #4
0
    def test_ego_mixed_integer_full_gaussian(self):
        n_iter = 15
        xtypes = [FLOAT, (ENUM, 3), (ENUM, 2), ORD]
        xlimits = np.array([[-5, 5], ["blue", "red", "green"],
                            ["large", "small"], [0, 2]])
        n_doe = 2
        sampling = MixedIntegerSamplingMethod(
            xtypes,
            xlimits,
            LHS,
            criterion="ese",
            random_state=42,
            output_in_folded_space=True,
        )
        xdoe = sampling(n_doe)
        criterion = "EI"  #'EI' or 'SBO' or 'LCB'
        sm = KRG(print_global=False)
        mixint = MixedIntegerContext(xtypes, xlimits)

        ego = EGO(
            n_iter=n_iter,
            criterion=criterion,
            xdoe=xdoe,
            xtypes=xtypes,
            xlimits=xlimits,
            surrogate=sm,
            enable_tunneling=False,
            random_state=42,
            categorical_kernel=FULL_GAUSSIAN,
        )
        _, y_opt, _, _, _ = ego.optimize(
            fun=TestEGO.function_test_mixed_integer)
Beispiel #5
0
    def test_branin_2D_mixed_parallel(self):
        n_parallel = 5
        n_iter = 20
        fun = Branin(ndim=2)
        xlimits = fun.xlimits
        criterion = "EI"  #'EI' or 'SBO' or 'UCB'
        qEI = "KB"
        xtypes = [INT, FLOAT]

        sm = KRG(print_global=False)
        mixint = MixedIntegerContext(xtypes, xlimits)
        sampling = mixint.build_sampling_method(FullFactorial)
        xdoe = sampling(10)

        ego = EGO(
            xdoe=xdoe,
            n_iter=n_iter,
            criterion=criterion,
            xtypes=[INT, FLOAT],
            xlimits=xlimits,
            n_parallel=n_parallel,
            qEI=qEI,
            evaluator=ParallelEvaluator(),
            surrogate=sm,
            random_state=42,
        )

        x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
        # 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
        self.assertTrue(
            np.allclose([[-3, 12.275]], x_opt, rtol=0.2)
            or np.allclose([[3, 2.275]], x_opt, rtol=0.2)
            or np.allclose([[9, 2.475]], x_opt, rtol=0.2))
        self.assertAlmostEqual(0.494, float(y_opt), delta=1)
Beispiel #6
0
class RemoteUnit(om.Group):

    def __init__(self, case):
        super(RemoteUnit, self).__init__()

        self.case = case

    def setup(self):
        
        case = self.case

        #load training data for surrogate

        if case == 'cold':
            ndim = 7
            train = np.loadtxt('./TrainingData/RUc_TrainingData[ese]_n=100.csv', delimiter=',')
            xt, yt = train[:,:ndim], train[:,ndim:]
        elif case == 'hot':
            ndim = 6
            train = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=100.csv', delimiter=',')
            xt, yt = train[:,:ndim], train[:,ndim:]

        #train surrogate and pass to model
        sm = KRG(theta0=[1e-2]*ndim,print_prediction = False)
        sm.set_training_values(xt, yt)
        sm.train()

        cycle = self.add_subsystem('cycle', om.Group(), promotes_inputs=['*'], promotes_outputs=['tBat', 'tProp', 'tBPanel', 'tDPanel'])
        cycle.add_subsystem('sc', SolarCell(), promotes_inputs=['tBPanel', 'tDPanel'], promotes_outputs=['eff'])
        cycle.add_subsystem('tm', ThermoSurrogate(sm=sm, case=case), promotes=['*'])
        # Nonlinear Block Gauss Seidel is a gradient free solver
        cycle.nonlinear_solver = om.NonlinearBlockGS()
Beispiel #7
0
    def train(self, X_train, y_train):
        if self.flavour == 'plain':
            self.smt_model = KRG(poly=self.poly,
                                 corr=self.corr,
                                 theta0=self.theta0)
        elif self.flavour == 'pls':
            self.smt_model = KPLS(poly=self.poly,
                                  corr=self.corr,
                                  theta0=self.theta0,
                                  n_comp=self.n_comp)
        elif self.flavour == 'plsk':
            self.smt_model = KPLSK(poly=self.poly,
                                   corr=self.corr,
                                   theta0=self.theta0,
                                   n_comp=self.n_comp)
        elif self.flavour == 'gepls':
            self.smt_model = GEKPLS(poly=self.poly,
                                    corr=self.corr,
                                    theta0=self.theta0,
                                    n_comp=self.n_comp,
                                    xlimits=self.xlimits,
                                    delta_x=self.delta_x,
                                    extra_points=self.extra_points)

        super(KrigingModel, self).train(X_train, y_train)
    def test_mixed_gower_krg(self):
        from smt.applications.mixed_integer import (
            MixedIntegerSurrogateModel,
            ENUM,
            GOWER,
        )
        from smt.surrogate_models import KRG
        import matplotlib.pyplot as plt
        import numpy as np

        xt = np.array([0, 3, 4])
        yt = np.array([0.0, 1.0, 1.5])

        xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"]]

        # Surrogate
        sm = MixedIntegerSurrogateModel(
            categorical_kernel=GOWER,
            xtypes=[(ENUM, 5)],
            xlimits=xlimits,
            surrogate=KRG(theta0=[1e-2]),
        )
        sm.set_training_values(xt, yt)
        sm.train()

        # DOE for validation
        x = np.linspace(0, 5, 5)
        y = sm.predict_values(x)

        plt.plot(xt, yt, "o", label="data")
        plt.plot(x, y, "d", color="red", markersize=3, label="pred")
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend()
        plt.show()
Beispiel #9
0
    def test_mixed_gower(self):
        from smt.applications.mixed_integer import MixedIntegerSurrogateModel, ENUM
        from smt.surrogate_models import KRG
        import matplotlib.pyplot as plt
        import numpy as np

        xt = np.linspace(1.0, 5.0, 5)
        x_train = np.array(["%.2f" % i for i in xt], dtype=object)
        yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])

        xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"]]

        # Surrogate
        sm = MixedIntegerSurrogateModel(
            use_gower_distance=True,
            xtypes=[(ENUM, 5)],
            xlimits=xlimits,
            surrogate=KRG(theta0=[1e-2]),
        )
        sm.set_training_values(x_train, yt)
        sm.train()

        # DOE for validation
        num = 101
        x = np.linspace(0, 5, num)
        x_pred = np.array(["%.2f" % i for i in x], dtype=object)
        y = sm.predict_values(x_pred)

        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.xlabel("actual")
        plt.ylabel("prediction")
        plt.show()
    def test_mixed_int_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG
        from smt.applications.mixed_integer import MixedIntegerSurrogateModel, INT

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])

        # xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
        # FLOAT means x1 continuous
        # INT means x2 integer
        # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
        # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable

        sm = MixedIntegerSurrogateModel(xtypes=[INT],
                                        xlimits=[[0, 4]],
                                        surrogate=KRG(theta0=[1e-2]))
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)

        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(["Training data", "Prediction"])
        plt.show()
Beispiel #11
0
    def test_branin_2D_mixed(self):
        n_iter = 20
        fun = Branin(ndim=2)
        xtypes = [INT, FLOAT]
        xlimits = fun.xlimits
        criterion = "EI"  #'EI' or 'SBO' or 'UCB'

        xdoe = FullFactorial(xlimits=xlimits)(10)
        s = KRG(print_global=False)
        ego = EGO(
            xdoe=xdoe,
            n_iter=n_iter,
            criterion=criterion,
            xtypes=xtypes,
            xlimits=xlimits,
            surrogate=s,
        )

        x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
        # 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
        self.assertTrue(
            np.allclose([[-3, 12.275]], x_opt, rtol=0.2)
            or np.allclose([[3, 2.275]], x_opt, rtol=0.2)
            or np.allclose([[9, 2.475]], x_opt, rtol=0.2))
        self.assertAlmostEqual(0.494, float(y_opt), delta=1)
Beispiel #12
0
    def test_ego_mixed_integer(self):
        n_iter = 15
        xtypes = [FLOAT, (ENUM, 3), (ENUM, 2), ORD]
        xlimits = np.array([[-5, 5], ["blue", "red", "green"],
                            ["large", "small"], ["0", "2", "3"]])
        n_doe = 2
        sampling = MixedIntegerSamplingMethod(xtypes,
                                              xlimits,
                                              LHS,
                                              criterion="ese",
                                              random_state=42)
        xdoe = sampling(n_doe)
        criterion = "EI"  #'EI' or 'SBO' or 'LCB'
        sm = KRG(print_global=False)
        mixint = MixedIntegerContext(xtypes, xlimits)

        ego = EGO(
            n_iter=n_iter,
            criterion=criterion,
            xdoe=xdoe,
            xtypes=xtypes,
            xlimits=xlimits,
            surrogate=sm,
            enable_tunneling=False,
            random_state=42,
        )
        _, y_opt, _, _, _ = ego.optimize(
            fun=TestEGO.function_test_mixed_integer)

        self.assertAlmostEqual(-15, float(y_opt), delta=5)
Beispiel #13
0
    def test_branin_2D_mixed(self):
        n_iter = 20
        fun = Branin(ndim=2)
        xtypes = [ORD, FLOAT]
        xlimits = fun.xlimits
        criterion = "EI"  #'EI' or 'SBO' or 'LCB'

        sm = KRG(print_global=False)
        mixint = MixedIntegerContext(xtypes, xlimits)
        sampling = MixedIntegerSamplingMethod(xtypes, xlimits, FullFactorial)
        xdoe = sampling(10)

        ego = EGO(
            xdoe=xdoe,
            n_iter=n_iter,
            criterion=criterion,
            xtypes=xtypes,
            xlimits=xlimits,
            surrogate=sm,
            random_state=42,
        )

        x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
        # 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
        self.assertTrue(
            np.allclose([[-3, 12.275]], x_opt, rtol=0.2)
            or np.allclose([[3, 2.275]], x_opt, rtol=0.2)
            or np.allclose([[9, 2.475]], x_opt, rtol=0.2))
        self.assertAlmostEqual(0.494, float(y_opt), delta=1)
Beispiel #14
0
    def setUp(self):
        ndim = 3
        nt = 100
        ne = 100
        ncomp = 1

        problems = OrderedDict()
        problems['exp'] = TensorProduct(ndim=ndim, func='exp')
        problems['tanh'] = TensorProduct(ndim=ndim, func='tanh')
        problems['cos'] = TensorProduct(ndim=ndim, func='cos')

        sms = OrderedDict()
        sms['LS'] = LS()
        sms['QP'] = QP()
        sms['KRG'] = KRG(theta0=[1e-2] * ndim)
        sms['KPLS'] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
        sms['KPLSK'] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
        sms['GEKPLS'] = GEKPLS(theta0=[1e-2] * ncomp,
                               n_comp=ncomp,
                               delta_x=1e-1)
        if compiled_available:
            sms['IDW'] = IDW()
            sms['RBF'] = RBF()
            sms['RMTC'] = RMTC()
            sms['RMTB'] = RMTB()

        t_errors = {}
        t_errors['LS'] = 1.0
        t_errors['QP'] = 1.0
        t_errors['KRG'] = 1e-5
        t_errors['KPLS'] = 1e-5
        t_errors['KPLSK'] = 1e-5
        t_errors['GEKPLS'] = 1e-5
        if compiled_available:
            t_errors['IDW'] = 1e-15
            t_errors['RBF'] = 1e-2
            t_errors['RMTC'] = 1e-1
            t_errors['RMTB'] = 1e-1

        e_errors = {}
        e_errors['LS'] = 1.5
        e_errors['QP'] = 1.5
        e_errors['KRG'] = 1e-2
        e_errors['KPLS'] = 1e-2
        e_errors['KPLSK'] = 1e-2
        e_errors['GEKPLS'] = 1e-2
        if compiled_available:
            e_errors['IDW'] = 1e0
            e_errors['RBF'] = 1e0
            e_errors['RMTC'] = 2e-1
            e_errors['RMTB'] = 2e-1

        self.nt = nt
        self.ne = ne
        self.ndim = ndim
        self.problems = problems
        self.sms = sms
        self.t_errors = t_errors
        self.e_errors = e_errors
Beispiel #15
0
 def test_krg_mixed_3D_bad_regr(self):
     xtypes = [FLOAT, (ENUM, 3), ORD]
     xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]]
     mixint = MixedIntegerContext(xtypes, xlimits)
     with self.assertRaises(ValueError):
         sm = mixint.build_surrogate_model(
             KRG(print_prediction=False, poly="linear")
         )
Beispiel #16
0
    def _setup_optimizer(self, fun):
        """
        Instanciate internal surrogate used for optimization 
        and setup function evaluator wrt options

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        ndarray: initial coord-x doe
        ndarray: initial coord-y doe = fun(xdoe)

        """
        # Set the bounds of the optimization problem
        xlimits = self.options["xlimits"]

        # Build initial DOE
        self._sampling = LHS(xlimits=xlimits, criterion="ese")
        self._evaluator = self.options["evaluator"]

        xdoe = self.options["xdoe"]
        if xdoe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = self._sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(xdoe)

        ydoe = self.options["ydoe"]
        if ydoe is None:
            y_doe = self._evaluator.run(fun, x_doe)
        else:  # to save time if y_doe is already given to EGO
            y_doe = ydoe

        self.gpr = KRG(print_global=False)

        return x_doe, y_doe
Beispiel #17
0
    def _initialize(self):

        super(MOO, self)._initialize()
        declare = self.options.declare

        declare("fun", None, types=FunctionType, desc="Function to minimize")
        declare(
            "criterion",
            "PI",
            types=str,
            values=["PI", "GA"],
            desc="criterion for next evaluation point determination: Expected Improvement, \
            Surrogate-Based Optimization or genetic algo point",
        )
        declare("n_iter", 10, types=int, desc="Number of optimizer steps")
        declare(
            "n_max_optim",
            20,
            types=int,
            desc="Maximum number of internal optimizations",
        )
        declare("xlimits", None, types=np.ndarray, desc="Bounds of function fun inputs")
        declare("n_start", 20, types=int, desc="Number of optimization start points")
        declare(
            "n_parallel",
            1,
            types=int,
            desc="Number of parallel samples to compute using qEI criterion",
        )
        declare(
            "surrogate",
            KRG(print_global=False),
            types=(KRG, KPLS, KPLSK, MGP),
            desc="SMT kriging-based surrogate model used internaly",
        )  # ne pas utiliser ou adapter au multiobj qu'on aie bien des modees indep pour chaque objectif
        declare(
            "pop_size",
            100,
            types=int,
            desc="number of individuals for the genetic algorithm",
        )
        declare(
            "n_gen",
            100,
            types=int,
            desc="number generations for the genetic algorithm",
        )
        declare(
            "q",
            0.5,
            types=float,
            desc="importance ration of desgn space in comparation to objective space when chosing a point with GA",
        )
        declare("verbose", False, types=bool, desc="Print computation information")
Beispiel #18
0
    def test_predict_output_shape(self):
        x = np.random.random((10, 3))
        y = np.random.random((10, 2))

        kriging = KRG()
        kriging.set_training_values(x, y)
        kriging.train()

        val = kriging.predict_values(x)
        self.assertEqual(y.shape, val.shape)

        var = kriging.predict_variances(x)
        self.assertEqual(y.shape, var.shape)
Beispiel #19
0
    def __init__(self, systemsize, input_dict, data_type=np.float):
        QuantityOfInterest.__init__(self, systemsize, data_type=data_type)

        # Load the eigenmodes
        fname = input_dict['surrogate info full path']
        surrogate_info = np.load(fname)
        surrogate_samples = surrogate_info['input_samples']
        fval_arr = surrogate_info['fvals']

        # Create the surrogate
        self.surrogate_type = input_dict['surrogate_type']
        if self.surrogate_type == 'quadratic':
            self.surrogate = QP()
        elif self.surrogate_type == 'kriging':
            theta0 = input_dict['kriging_theta']
            self.surrogate = KRG(theta0=[theta0],
                                 corr=input_dict['correlation function'])
        else:
            raise NotImplementedError
        self.surrogate.set_training_values(surrogate_samples.T, fval_arr)
        self.surrogate.train()
Beispiel #20
0
 def modelize(self, xt, yt):
     self.modeles = []
     for iny in range(self.ny):
         t = KRG(print_global=False)
         t.set_training_values(xt, yt[iny])
         t.train()
         self.modeles.append(t)
class SS_model_KRG(SS_model_base):
    def __init__(self, data, x_headers, y_header, RBF_scale=[2.91, 174, 12.8]):

        super().__init__(data, x_headers, y_header)

        self.RBF_scale = RBF_scale
        self.model = KRG(theta0=RBF_scale,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)

    def update_hyperparameters(self, param):

        self.RBF_scale = param
        self.model = KRG(theta0=param,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)

    def get_hyperparameters(self):

        return self.RBF_scale

    def fit(self):

        self.model.set_training_values(self.X_train, self.y_train)
        self.model.train()

    def model_predict(self):

        X = np.array(self.X_pred)
        self.y_pred = self.model.predict_values(X)
        self.y_std = self.model.predict_variances(X)
Beispiel #22
0
    def test_mixed_full_gaussian_3D(self):
        from smt.applications.mixed_integer import (
            MixedIntegerSurrogateModel,
            ENUM,
            FLOAT,
            ORD,
            FULL_GAUSSIAN,
        )
        from smt.surrogate_models import KRG
        import matplotlib.pyplot as plt
        import numpy as np
        import itertools

        xt = np.array([[0, 5, 0], [2, -1, 2], [4, 0.5, 1]])
        yt = np.array([[0.0], [1.0], [1.5]])
        xlimits = [
            ["0.0", "1.0", " 2.0", "3.0", "4.0"],
            [-5, 5],
            ["0.0", "1.0", " 2.0", "3.0"],
        ]

        # Surrogate
        sm = MixedIntegerSurrogateModel(
            categorical_kernel=FULL_GAUSSIAN,
            xtypes=[(ENUM, 5), ORD, (ENUM, 4)],
            xlimits=xlimits,
            surrogate=KRG(theta0=[1e-2]),
        )
        sm.set_training_values(xt, yt)
        sm.train()

        # DOE for validation
        x = np.linspace(0, 4, 5)
        x2 = np.linspace(-5, 5, 21)
        x3 = np.linspace(0, 3, 4)
        x1 = []
        for element in itertools.product(x, x2, x3):
            x1.append(np.array(element))
        x_pred = np.array(x1)

        i = 0
        for x in x_pred:
            print(i, x)
            i += 1
        y = sm.predict_values(x_pred)
        yvar = sm.predict_variances(x_pred)

        # prediction are correct on known points
        self.assertTrue(np.abs(np.sum(np.array([y[80], y[202], y[381]]) - yt)) < 1e-6)
        self.assertTrue(
            np.abs(np.sum(np.array([yvar[80], yvar[202], yvar[381]]))) < 1e-6
        )
Beispiel #23
0
    def test_noise_estimation(self):
        xt = np.array([[0.0], [1.0], [2.0], [3.0], [4.0]])
        yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
        sm = KRG(hyper_opt="Cobyla", eval_noise=True, noise0=[1e-4])

        sm.set_training_values(xt, yt)
        sm.train()
        x = np.linspace(0, 4, 100)
        y = sm.predict_values(x)
        self.assert_error(np.array(sm.optimal_theta), np.array([0.11798507]),
                          1e-5, 1e-5)
Beispiel #24
0
    def test_likelihood_hessian(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "act_exp",
                "matern32",
                "matern52",
        ]:  # For every kernel
            for poly_str in ["constant", "linear",
                             "quadratic"]:  # For every method
                if corr_str == "act_exp":
                    kr = MGP(print_global=False)
                    theta = self.random.rand(4)
                else:
                    kr = KRG(print_global=False)
                    theta = self.theta
                kr.options["poly"] = poly_str
                kr.options["corr"] = corr_str
                kr.set_training_values(self.X, self.y)
                kr.train()
                grad_red, dpar = kr._reduced_likelihood_gradient(theta)

                hess, hess_ij, _ = kr._reduced_likelihood_hessian(theta)
                Hess = np.zeros((theta.shape[0], theta.shape[0]))
                Hess[hess_ij[:, 0], hess_ij[:, 1]] = hess[:, 0]
                Hess[hess_ij[:, 1], hess_ij[:, 0]] = hess[:, 0]

                grad_norm_all = []
                diff_norm_all = []
                ind_theta = []
                for j, omega_j in enumerate(theta):
                    eps_omega = theta.copy()
                    eps_omega[j] += self.eps

                    grad_red_eps, _ = kr._reduced_likelihood_gradient(
                        eps_omega)
                    for i, theta_i in enumerate(theta):

                        hess_eps = (grad_red_eps[i] - grad_red[i]) / self.eps

                        grad_norm_all.append(
                            np.linalg.norm(Hess[i, j]) / np.linalg.norm(Hess))
                        diff_norm_all.append(
                            np.linalg.norm(hess_eps) / np.linalg.norm(Hess))
                        ind_theta.append(r"$x_%d,x_%d$" % (j, i))
                self.assert_error(
                    np.array(grad_norm_all),
                    np.array(diff_norm_all),
                    atol=1e-5,
                    rtol=1e-3,
                )  # from utils/smt_test_case.py
    def test_mixed_int_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG
        from smt.applications.mixed_integer import MixedIntegerSurrogateModel, INT

        xt = np.array([0.0, 2.0, 3.0])
        yt = np.array([0.0, 1.5, 0.9])

        # xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
        # FLOAT means x1 continuous
        # INT means x2 integer
        # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
        # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable

        sm = MixedIntegerSurrogateModel(xtypes=[INT],
                                        xlimits=[[0, 4]],
                                        surrogate=KRG(theta0=[1e-2]))
        sm.set_training_values(xt, yt)
        sm.train()

        num = 500
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)
        # estimated variance
        s2 = sm.predict_variances(x)

        fig, axs = plt.subplots(2)

        axs[0].plot(xt, yt, "o")
        axs[0].plot(x, y)
        axs[0].set_xlabel("x")
        axs[0].set_ylabel("y")
        axs[0].legend(["Training data", "Prediction"])

        # add a plot with variance
        axs[1].plot(xt, yt, "o")
        axs[1].plot(x, y)
        axs[1].fill_between(
            np.ravel(x),
            np.ravel(y - 3 * np.sqrt(s2)),
            np.ravel(y + 3 * np.sqrt(s2)),
            color="lightgrey",
        )
        axs[1].set_xlabel("x")
        axs[1].set_ylabel("y")
        axs[1].legend(
            ["Training data", "Prediction", "Confidence Interval 99%"])

        plt.show()
    def test_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])

        sm = KRG(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)
        # estimated variance
        s2 = sm.predict_variances(x)
        # derivative according to the first variable
        dydx = sm.predict_derivatives(xt, 0)
        fig, axs = plt.subplots(2)

        axs[0].plot(xt, yt, "o")
        axs[0].plot(x, y)
        axs[0].set_xlabel("x")
        axs[0].set_ylabel("y")
        axs[0].legend(["Training data", "Prediction"])

        # add a plot with variance
        axs[1].plot(xt, yt, "o")
        axs[1].plot(x, y)
        axs[1].fill_between(
            np.ravel(x),
            np.ravel(y - 3 * np.sqrt(s2)),
            np.ravel(y + 3 * np.sqrt(s2)),
            color="lightgrey",
        )
        axs[1].set_xlabel("x")
        axs[1].set_ylabel("y")
        axs[1].legend(
            ["Training data", "Prediction", "Confidence Interval 99%"])

        plt.show()
Beispiel #27
0
    def test_mixed_gower_2D(self):
        from smt.applications.mixed_integer import (
            MixedIntegerSurrogateModel,
            ENUM,
            FLOAT,
            GOWER,
        )
        from smt.surrogate_models import KRG
        import matplotlib.pyplot as plt
        import numpy as np
        import itertools

        xt = np.array([[0, 5], [2, -1], [4, 0.5]])
        yt = np.array([[0.0], [1.0], [1.5]])
        xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]]

        # Surrogate
        sm = MixedIntegerSurrogateModel(
            categorical_kernel=GOWER,
            xtypes=[(ENUM, 5), FLOAT],
            xlimits=xlimits,
            surrogate=KRG(theta0=[1e-2], corr="abs_exp"),
        )
        sm.set_training_values(xt, yt)
        sm.train()

        # DOE for validation
        x = np.linspace(0, 4, 5)
        x2 = np.linspace(-5, 5, 21)
        x1 = []
        for element in itertools.product(x, x2):
            x1.append(np.array(element))
        x_pred = np.array(x1)

        i = 0
        for x in x_pred:
            print(i, x)
            i += 1
        y = sm.predict_values(x_pred)
        yvar = sm.predict_variances(x_pred)

        # prediction are correct on known points
        self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
        self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)

        self.assertEqual(np.shape(y), (105, 1))
Beispiel #28
0
    def run_mixed_integer_context_example(self):
        import numpy as np
        import matplotlib.pyplot as plt
        from matplotlib import colors
        from mpl_toolkits.mplot3d import Axes3D

        from smt.surrogate_models import KRG
        from smt.sampling_methods import LHS, Random
        from smt.applications.mixed_integer import MixedIntegerContext, FLOAT, INT, ENUM

        xtypes = [INT, FLOAT, (ENUM, 4)]
        xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]]

        def ftest(x):
            return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1)

        # context to create consistent DOEs and surrogate
        mixint = MixedIntegerContext(xtypes, xlimits)

        # DOE for training
        lhs = mixint.build_sampling_method(LHS, criterion="ese")

        num = mixint.get_unfolded_dimension() * 5
        print("DOE point nb = {}".format(num))
        xt = lhs(num)
        yt = ftest(xt)

        # Surrogate
        sm = mixint.build_surrogate_model(KRG())
        print(xt)
        sm.set_training_values(xt, yt)
        sm.train()

        # DOE for validation
        rand = mixint.build_sampling_method(Random)
        xv = rand(50)
        yv = ftest(xv)
        yp = sm.predict_values(xv)

        plt.plot(yv, yv)
        plt.plot(yv, yp, "o")
        plt.xlabel("actual")
        plt.ylabel("prediction")

        plt.show()
Beispiel #29
0
    def test_likelihood_derivatives(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "act_exp",
                "matern32",
                "matern52",
        ]:  # For every kernel
            for poly_str in ["constant", "linear",
                             "quadratic"]:  # For every method
                if corr_str == "act_exp":
                    kr = MGP(print_global=False)
                    theta = self.random.rand(4)
                else:
                    kr = KRG(print_global=False)
                    theta = self.theta
                kr.options["poly"] = poly_str
                kr.options["corr"] = corr_str
                kr.set_training_values(self.X, self.y)
                kr.train()

                grad_red, dpar = kr._reduced_likelihood_gradient(theta)
                red, par = kr._reduced_likelihood_function(theta)

                grad_norm_all = []
                diff_norm_all = []
                ind_theta = []
                for i, theta_i in enumerate(theta):
                    eps_theta = theta.copy()
                    eps_theta[i] = eps_theta[i] + self.eps

                    red_dk, par_dk = kr._reduced_likelihood_function(eps_theta)
                    dred_dk = (red_dk - red) / self.eps

                    grad_norm_all.append(grad_red[i])
                    diff_norm_all.append(float(dred_dk))
                    ind_theta.append(r"$x_%d$" % i)

                grad_norm_all = np.atleast_2d(grad_norm_all)
                diff_norm_all = np.atleast_2d(diff_norm_all).T
                self.assert_error(grad_norm_all,
                                  diff_norm_all,
                                  atol=1e-5,
                                  rtol=1e-3)  # from utils/smt_test_case.py
Beispiel #30
0
    def setUp(self):
        ndim = 10
        nt = 500
        ne = 100

        problems = OrderedDict()
        problems["sphere"] = Sphere(ndim=ndim)
        problems["exp"] = TensorProduct(ndim=ndim, func="exp")
        problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
        problems["cos"] = TensorProduct(ndim=ndim, func="cos")

        sms = OrderedDict()
        sms["LS"] = LS()
        sms["QP"] = QP()
        sms["KRG"] = KRG(theta0=[4e-1] * ndim)
        sms["KPLS"] = KPLS()

        if compiled_available:
            sms["IDW"] = IDW()
            sms["RBF"] = RBF()

        t_errors = {}
        t_errors["LS"] = 1.0
        t_errors["QP"] = 1.0
        t_errors["KRG"] = 1e-4
        t_errors["IDW"] = 1e-15
        t_errors["RBF"] = 1e-2
        t_errors["KPLS"] = 1e-3

        e_errors = {}
        e_errors["LS"] = 2.5
        e_errors["QP"] = 2.0
        e_errors["KRG"] = 2.0
        e_errors["IDW"] = 4
        e_errors["RBF"] = 2
        e_errors["KPLS"] = 2.5

        self.nt = nt
        self.ne = ne
        self.problems = problems
        self.sms = sms
        self.t_errors = t_errors
        self.e_errors = e_errors