Esempio n. 1
0
 def modelize(self, xt, yt):
     self.modeles = []
     for iny in range(self.ny):
         t = KRG(print_global=False)
         t.set_training_values(xt, yt[iny])
         t.train()
         self.modeles.append(t)
class SS_model_KRG(SS_model_base):
    def __init__(self, data, x_headers, y_header, RBF_scale=[2.91, 174, 12.8]):

        super().__init__(data, x_headers, y_header)

        self.RBF_scale = RBF_scale
        self.model = KRG(theta0=RBF_scale,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)

    def update_hyperparameters(self, param):

        self.RBF_scale = param
        self.model = KRG(theta0=param,
                         poly='quadratic',
                         print_training=False,
                         print_global=False)

    def get_hyperparameters(self):

        return self.RBF_scale

    def fit(self):

        self.model.set_training_values(self.X_train, self.y_train)
        self.model.train()

    def model_predict(self):

        X = np.array(self.X_pred)
        self.y_pred = self.model.predict_values(X)
        self.y_std = self.model.predict_variances(X)
Esempio n. 3
0
    def test_variance_derivatives(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "matern32",
                "matern52",
        ]:
            kr = KRG(print_global=False)
            kr.options["poly"] = "constant"
            kr.options["corr"] = corr_str
            kr.set_training_values(self.X, self.y)
            kr.train()

            e = 1e-6
            xa = random.random()
            xb = random.random()
            x_valid = [[xa, xb], [xa + e, xb], [xa - e, xb], [xa, xb + e],
                       [xa, xb - e]]

            y_predicted = kr.predict_variances(np.array(x_valid))
            y_jacob = np.zeros((2, 5))

            for i in range(np.shape(x_valid)[0]):
                l = kr.predict_variance_derivatives(np.atleast_2d(
                    x_valid[i]))[0]
                y_jacob[:, i] = l

            diff_g = (y_predicted[1][0] - y_predicted[2][0]) / (2 * e)
            diff_d = (y_predicted[3][0] - y_predicted[4][0]) / (2 * e)

            jac_rel_error1 = abs((y_jacob[0] - diff_g) / y_jacob[0])
            self.assert_error(jac_rel_error1, 1e-3, atol=0.01, rtol=0.01)

            jac_rel_error2 = abs((y_jacob[1] - diff_d) / y_jacob[1])
            self.assert_error(jac_rel_error2, 1e-3, atol=0.01, rtol=0.01)
Esempio n. 4
0
    def test_predict_output_shape(self):
        d, n = (3, 10)
        sx = LHS(
            xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
            criterion="m",
            random_state=42,
        )
        x = sx(n)
        sy = LHS(
            xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), 2, axis=0),
            criterion="m",
            random_state=42,
        )
        y = sy(n)

        kriging = KRG()
        kriging.set_training_values(x, y)
        kriging.train()

        val = kriging.predict_values(x)
        self.assertEqual(y.shape, val.shape)

        var = kriging.predict_variances(x)
        self.assertEqual(y.shape, var.shape)

        kriging = KRG(n_start=1)
        kriging.set_training_values(x, y)
        kriging.train()

        val2 = kriging.predict_values(x)
        self.assertEqual(y.shape, val2.shape)
Esempio n. 5
0
    def test_noise_estimation(self):
        xt = np.array([[0.0], [1.0], [2.0], [3.0], [4.0]])
        yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
        sm = KRG(hyper_opt="Cobyla", eval_noise=True, noise0=[1e-4])

        sm.set_training_values(xt, yt)
        sm.train()
        x = np.linspace(0, 4, 100)
        y = sm.predict_values(x)
        self.assert_error(np.array(sm.optimal_theta), np.array([0.11798507]),
                          1e-5, 1e-5)
Esempio n. 6
0
    def test_predict_output_shape(self):
        x = np.random.random((10, 3))
        y = np.random.random((10, 2))

        kriging = KRG()
        kriging.set_training_values(x, y)
        kriging.train()

        val = kriging.predict_values(x)
        self.assertEqual(y.shape, val.shape)

        var = kriging.predict_variances(x)
        self.assertEqual(y.shape, var.shape)
def kriging(xt, yt, xtest, ytest, ndim):
    ########### The Kriging model

    # The variable 'theta0' is a list of length ndim.
    t = KRG(theta0=[1e-2] * ndim, print_prediction=False)
    t.set_training_values(xt, yt)
    t.train()

    title = 'Kriging model: validation of the prediction model'
    print(title)
    print('Kriging,  err: ' + str(compute_rms_error(t, xtest, ytest)))
    print("theta values", t.optimal_theta)

    # Plot the function, the prediction and the 95% confidence interval based on
    # the MSE
    return t, title, xtest, ytest
Esempio n. 8
0
class SurrogateModelSMT(SurrogateModelPredict):
    def __init__(self, problem):
        super().__init__(problem)

        self.train_step = -1
        self.sigma_threshold = 10
        self.score_threshold = 0.5

    def init_default_regressor(self):
        # default regressor
        self.regressor = KRG(theta0=[1e-2])
        self.has_epsilon = True

    def predict(self, x, *args):
        if self.trained:
            return self.regressor.predict_values(np.array([x]))
        else:
            assert 0

    def predict_variances(self, x, *args):
        if self.trained:
            return self.regressor.predict_variances(np.array([x]), *args)
        else:
            assert 0

    def train(self):
        self.trained = False
        assert (len(self.x_data) == len(self.y_data))

        # print(self.x_data)
        # print("Trained set: {}".format(len(self.x_data)))

        self.regressor.options["print_global"] = False
        self.regressor.set_training_values(np.array(self.x_data),
                                           np.array(self.y_data))
        self.regressor.train()

        if self.eval_stats:
            # score
            pass
            # self.score = self.regressor.score(self.x_data, self.y_data)
            # print("self.score = {} : {}".format(len(self.x_data), self.score))
            # lml (Gaussian regressor)
            # if "log_marginal_likelihood" in dir(self.regressor):
            #    self.lml, self.lml_gradient = self.regressor.log_marginal_likelihood(self.regressor.kernel_.theta, eval_gradient=True)

        self.trained = True
Esempio n. 9
0
class InterceptorSurrogateQoI(QuantityOfInterest):
    """
    Class that creates a surrogate model for the dymos supersonic intercpetor problem
    for analysis
    """
    def __init__(self, systemsize, input_dict, data_type=np.float):
        QuantityOfInterest.__init__(self, systemsize, data_type=data_type)

        # Load the eigenmodes
        fname = input_dict['surrogate info full path']
        surrogate_info = np.load(fname)
        surrogate_samples = surrogate_info['input_samples']
        fval_arr = surrogate_info['fvals']

        # Create the surrogate
        self.surrogate_type = input_dict['surrogate_type']
        if self.surrogate_type == 'quadratic':
            self.surrogate = QP()
        elif self.surrogate_type == 'kriging':
            theta0 = input_dict['kriging_theta']
            self.surrogate = KRG(theta0=[theta0],
                                 corr=input_dict['correlation function'])
        else:
            raise NotImplementedError
        self.surrogate.set_training_values(surrogate_samples.T, fval_arr)
        self.surrogate.train()

    def eval_QoI(self, mu, xi):
        rv = mu + xi
        return self.surrogate.predict_values(np.expand_dims(rv, axis=0))

    def eval_QoIGradient(self, mu, xi):
        rv = np.expand_dims(mu + xi, axis=0)
        dfdrv = np.zeros(self.systemsize, dtype=self.data_type)
        for i in range(self.systemsize):
            dfdrv[i] = self.surrogate.predict_derivatives(rv, i)[0, 0]

        return dfdrv

    def eval_QoIGradient_fd(self, mu, xi):
        # This function uses numdifftools to compute the gradients. Only use for
        # debugging.
        def func(xi):
            return self.eval_QoI(mu, xi)

        G = nd.Gradient(func)(xi)
        return G
Esempio n. 10
0
    def test_predict_output(self):
        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 1.1, 1.0])

        # Adding noisy repetitions
        np.random.seed(6)
        yt_std_rand = np.std(yt) * np.random.uniform(size=yt.shape)
        xt_full = np.array(3 * xt.tolist())
        yt_full = np.concatenate((yt, yt + 0.2 * yt_std_rand, yt - 0.2 * yt_std_rand))

        sm = KRG(theta0=[1.0], eval_noise=True, use_het_noise=True, n_start=1)
        sm.set_training_values(xt_full, yt_full)
        sm.train()

        yt = yt.reshape(-1, 1)
        y = sm.predict_values(xt)
        t_error = np.linalg.norm(y - yt) / np.linalg.norm(yt)
        self.assert_error(t_error, 0.0, 1e-2)
    def test_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])

        sm = KRG(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)
        # estimated variance
        s2 = sm.predict_variances(x)
        # derivative according to the first variable
        dydx = sm.predict_derivatives(xt, 0)
        fig, axs = plt.subplots(2)

        axs[0].plot(xt, yt, "o")
        axs[0].plot(x, y)
        axs[0].set_xlabel("x")
        axs[0].set_ylabel("y")
        axs[0].legend(["Training data", "Prediction"])

        # add a plot with variance
        axs[1].plot(xt, yt, "o")
        axs[1].plot(x, y)
        axs[1].fill_between(
            np.ravel(x),
            np.ravel(y - 3 * np.sqrt(s2)),
            np.ravel(y + 3 * np.sqrt(s2)),
            color="lightgrey",
        )
        axs[1].set_xlabel("x")
        axs[1].set_ylabel("y")
        axs[1].legend(
            ["Training data", "Prediction", "Confidence Interval 99%"])

        plt.show()
Esempio n. 12
0
    def test_derivatives(self):
        # Construction of the DOE
        fun = Sphere(ndim=2)
        sampling = LHS(xlimits=fun.xlimits, criterion="m")
        xt = sampling(20)
        yt = fun(xt)

        # Compute the training derivatives
        for i in range(2):
            yd = fun(xt, kx=i)
            yt = np.concatenate((yt, yd), axis=1)

        # check KRG models
        sm_krg_c = KRG(poly="constant", print_global=False)
        sm_krg_c.set_training_values(xt, yt[:, 0])
        sm_krg_c.train()
        TestKRG._check_derivatives(sm_krg_c, xt, yt)

        sm_krg_l = KRG(poly="linear", print_global=False)
        sm_krg_l.set_training_values(xt, yt[:, 0])
        sm_krg_l.train()
        TestKRG._check_derivatives(sm_krg_l, xt, yt)
Esempio n. 13
0
    def test_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG

        xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
        yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])

        sm = KRG(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0.0, 4.0, num)
        y = sm.predict_values(x)

        plt.plot(xt, yt, "o")
        plt.plot(x, y)
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(["Training data", "Prediction"])
        plt.show()
Esempio n. 14
0
    def test_krg(self):
        import numpy as np
        import matplotlib.pyplot as plt

        from smt.surrogate_models import KRG

        xt = np.array([0., 1., 2., 3., 4.])
        yt = np.array([0., 1., 1.5, 0.5, 1.0])

        sm = KRG(theta0=[1e-2])
        sm.set_training_values(xt, yt)
        sm.train()

        num = 100
        x = np.linspace(0., 4., num)
        y = sm.predict_values(x)

        plt.plot(xt, yt, 'o')
        plt.plot(x, y)
        plt.xlabel('x')
        plt.ylabel('y')
        plt.legend(['Training data', 'Prediction'])
        plt.show()
Esempio n. 15
0
 def train_sm(self, level=0, option=None):
     '''
     train surrogate model.
     level=0: deepest level, level=-1: combine f in all levels
     '''
     len_x = len(self._x)
     len_f = len(self._f)
     len_sm = len(self._sm)
     if not (len_x == len_f):
         raise ValueError('number of levels in x and f are different.')
     if len_x == 0:
         raise ValueError('no points exists for training.')
     if level == -1:
         # train model for points from all levels
         sm = []
         for idx in range(0, len_x):
             sm_tmp = KRG(print_global=False)
             x = deepcopy(self._x[idx])
             f = deepcopy(self._f[idx])
             xlimits = deepcopy(self._xlimits[idx])
             if idx > 0:
                 for jdx in reversed(range(0, idx)):
                     xn = deepcopy(self._x[jdx])
                     fn = deepcopy(self._f[jdx])
                     for kdx in range(0, xlimits.shape[0]):
                         m1 = xn[:, kdx] >= xlimits[kdx, 0]
                         m2 = xn[:, kdx] <= xlimits[kdx, 1]
                         m0 = np.logical_and(m1, m2)
                         xn = xn[m0, :]
                         fn = fn[m0, :]
                     x = np.append(x, xn, axis=0)
                     f = np.append(f, fn, axis=0)
             for jdx in range(0, idx):
                 f -= sm[jdx].predict_values(x)
             sm_tmp.set_training_values(x, f)
             sm_tmp.train()
             sm.append(deepcopy(sm_tmp))
             del (sm_tmp)
         self._sm = sm
     elif level == 0:
         # points from deepest level
         sm = self._sm
         if len_sm == len_x:
             sm[-1] = None
         elif len_sm + 1 == len_x:
             sm.append(None)
         else:
             raise ValueError('sm dimension wrong.')
         idx_KRG = 0
         for idx in range(0, len_sm):
             if type(sm[idx]) == KRG:
                 idx_KRG = idx + 1
             else:
                 break
         for idx in range(idx_KRG, len_x):
             sm_tmp = KRG(print_global=False)
             x = deepcopy(self._x[idx])
             f = deepcopy(self._f[idx])
             xlimits = deepcopy(self._xlimits[idx])
             if idx > 0:
                 for jdx in reversed(range(0, idx)):
                     xn = deepcopy(self._x[jdx])
                     fn = deepcopy(self._f[jdx])
                     for kdx in range(0, xlimits.shape[0]):
                         m1 = xn[:, kdx] >= xlimits[kdx, 0]
                         m2 = xn[:, kdx] <= xlimits[kdx, 1]
                         m0 = np.logical_and(m1, m2)
                         xn = xn[m0, :]
                         fn = fn[m0, :]
                     x = np.append(x, xn, axis=0)
                     f = np.append(f, fn, axis=0)
             for jdx in range(0, idx):
                 f -= sm[jdx].predict_values(x)
             sm_tmp.set_training_values(x, f)
             sm_tmp.train()
             sm[idx] = deepcopy(sm_tmp)
             del (sm_tmp)
         self._sm = sm
     else:
         raise ValueError('level should be either -1 or 0.\n' \
             + 'level=0: deepest level, level=-1: all levels.')
Esempio n. 16
0
class EGO(SurrogateBasedApplication):
    def _initialize(self):
        super(EGO, self)._initialize()
        declare = self.options.declare

        declare("fun", None, types=FunctionType, desc="Function to minimize")
        declare(
            "criterion",
            "EI",
            types=str,
            values=["EI", "SBO", "UCB"],
            desc="criterion for next evaluation point determination: Expected Improvement, \
            Surrogate-Based Optimization or Upper Confidence Bound",
        )
        declare("n_iter", None, types=int, desc="Number of optimizer steps")
        declare(
            "n_max_optim",
            20,
            types=int,
            desc="Maximum number of internal optimizations",
        )
        declare("n_start", 20, types=int, desc="Number of optimization start points")
        declare(
            "n_parallel",
            1,
            types=int,
            desc="Number of parallel samples to compute using qEI criterion",
        )
        declare(
            "qEI",
            "KBLB",
            types=str,
            values=["KB", "KBLB", "KBUB", "KBRand", "CLmin"],
            desc="Approximated q-EI maximization strategy",
        )
        declare(
            "evaluator",
            default=Evaluator(),
            types=Evaluator,
            desc="Object used to run function fun to optimize at x points (nsamples, nxdim)",
        )
        declare(
            "n_doe",
            None,
            types=int,
            desc="Number of points of the initial LHS doe, only used if xdoe is not given",
        )
        declare("xdoe", None, types=np.ndarray, desc="Initial doe inputs")
        declare("ydoe", None, types=np.ndarray, desc="Initial doe outputs")
        declare("xlimits", None, types=np.ndarray, desc="Bounds of function fun inputs")
        declare("verbose", False, types=bool, desc="Print computation information")

    def optimize(self, fun):
        """
        Optimizes fun

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        [nx, 1]: x optimum
        [1, 1]: y optimum
        int: index of optimum in data arrays 
        [ndoe + n_iter, nx]: coord-x data
        [ndoe + n_iter, 1]: coord-y data
        [ndoe, nx]: coord-x initial doe
        [ndoe, 1]: coord-y initial doe
        """
        # Set the bounds of the optimization problem
        xlimits = self.options["xlimits"]

        # Build initial DOE
        self._sampling = LHS(xlimits=xlimits, criterion="ese")
        self._evaluator = self.options["evaluator"]

        xdoe = self.options["xdoe"]
        if xdoe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = self._sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(xdoe)

        ydoe = self.options["ydoe"]
        if ydoe is None:
            y_doe = self._evaluator.run(fun, x_doe)
        else:  # to save time if y_doe is already given to EGO
            y_doe = ydoe

        # to save the initial doe
        x_data = x_doe
        y_data = y_doe

        self.gpr = KRG(print_global=False)

        n_iter = self.options["n_iter"]
        n_parallel = self.options["n_parallel"]

        for k in range(n_iter):
            # Virtual enrichement loop
            for p in range(n_parallel):
                x_et_k, success = self._find_points(x_data, y_data)
                if not success:
                    self.log(
                        "Internal optimization failed at EGO iter = {}.{}".format(k, p)
                    )
                    break
                elif success:
                    self.log(
                        "Internal optimization succeeded at EGO iter = {}.{}".format(
                            k, p
                        )
                    )
                # Set temporaly the y_data to the one predicted by the kringin metamodel
                y_et_k = self.set_virtual_point(np.atleast_2d(x_et_k), y_data)

                # Update y_data with predicted value
                y_data = np.atleast_2d(np.append(y_data, y_et_k)).T
                x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))

            # Compute the real values of y_data
            x_to_compute = np.atleast_2d(x_data[-n_parallel:])
            y = self._evaluator.run(fun, x_to_compute)
            y_data[-n_parallel:] = y

        # Find the optimal point
        ind_best = np.argmin(y_data)
        x_opt = x_data[ind_best]
        y_opt = y_data[ind_best]

        return x_opt, y_opt, ind_best, x_data, y_data, x_doe, y_doe

    def log(self, msg):
        if self.options["verbose"]:
            print(msg)

    def EI(self, points, y_data):
        """ Expected improvement """
        f_min = np.min(y_data)
        pred = self.gpr.predict_values(points)
        sig = np.sqrt(self.gpr.predict_variances(points))
        args0 = (f_min - pred) / sig
        args1 = (f_min - pred) * norm.cdf(args0)
        args2 = sig * norm.pdf(args0)
        if sig.size == 1 and sig == 0.0:  # can be use only if one point is computed
            return 0.0

        ei = args1 + args2
        return ei

    def SBO(self, point):
        """ Surrogate based optimization: min the surrogate model by suing the mean mu """
        res = self.gpr.predict_values(point)
        return res

    def UCB(self, point):
        """ Upper confidence bound optimization: minimize by using mu - 3*sigma """
        pred = self.gpr.predict_values(point)
        var = self.gpr.predict_variances(point)
        res = pred - 3.0 * np.sqrt(var)
        return res

    def _find_points(self, x_data, y_data):
        """
        Function that analyse a set of x_data and y_data and give back the 
        more interesting point to evaluates according to the selected criterion
        
        Inputs: 
            - x_data and y_data
        Outputs:
            - x_et_k : the points to evaluate
            - success bool : boolean succes flag to interupte
                the main loop if need
        
        """
        self.gpr.set_training_values(x_data, y_data)
        self.gpr.train()

        criterion = self.options["criterion"]
        n_start = self.options["n_start"]
        n_max_optim = self.options["n_max_optim"]
        bounds = self.options["xlimits"]

        if criterion == "EI":
            self.obj_k = lambda x: -self.EI(np.atleast_2d(x), y_data)
        elif criterion == "SBO":
            self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
        elif criterion == "UCB":
            self.obj_k = lambda x: self.UCB(np.atleast_2d(x))

        success = False
        n_optim = 1  # in order to have some success optimizations with SLSQP
        while not success and n_optim <= n_max_optim:
            opt_all = []
            x_start = self._sampling(n_start)
            for ii in range(n_start):
                opt_all.append(
                    minimize(
                        self.obj_k,
                        x_start[ii, :],
                        method="SLSQP",
                        bounds=bounds,
                        options={"maxiter": 200},
                    )
                )

            opt_all = np.asarray(opt_all)

            opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
            obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
            success = obj_success.size != 0
            if not success:
                self.log("New start point for the internal optimization")
                n_optim += 1

        if n_optim >= n_max_optim:
            # self.log("Internal optimization failed at EGO iter = {}".format(k))
            return np.atleast_2d(0), False

        ind_min = np.argmin(obj_success)
        opt = opt_success[ind_min]
        x_et_k = np.atleast_2d(opt["x"])
        return x_et_k, True

    def set_virtual_point(self, x, y_data):
        qEI = self.options["qEI"]

        if qEI == "CLmin":
            return np.min(y_data)

        if qEI == "KB":
            return self.gpr.predict_values(x)

        if qEI == "KBUB":
            conf = 3.0

        if qEI == "KBLB":
            conf = -3.0

        if qEI == "KBRand":
            conf = np.random.randn()

        pred = self.gpr.predict_values(x)
        var = self.gpr.predict_variances(x)
        return pred + conf * np.sqrt(var)
Esempio n. 17
0
from smt.surrogate_models import KRG
import numpy as np

from numpy import genfromtxt

x = genfromtxt('x.csv', delimiter=',')
y = genfromtxt('y.csv', delimiter=',')
x = np.atleast_2d(x)
y = np.atleast_2d(y).reshape(-1, 1)

sm = KRG(theta0=[1e-2], print_global=False)
sm.set_training_values(x, y)
sm.train()



Esempio n. 18
0
class EGO(SurrogateBasedApplication):
    def _initialize(self):
        super(EGO, self)._initialize()
        declare = self.options.declare

        declare("fun", None, types=FunctionType, desc="Function to minimize")
        declare(
            "criterion",
            "EI",
            types=str,
            values=["EI", "SBO", "UCB"],
            desc=
            "criterion for next evaluation point determination: Expected Improvement, \
            Surrogate-Based Optimization or Upper Confidence Bound",
        )
        declare("n_iter", None, types=int, desc="Number of optimizer steps")
        declare(
            "n_max_optim",
            20,
            types=int,
            desc="Maximum number of internal optimizations",
        )
        declare("n_start",
                20,
                types=int,
                desc="Number of optimization start points")
        declare(
            "n_doe",
            None,
            types=int,
            desc=
            "Number of points of the initial LHS doe, only used if xdoe is not given",
        )
        declare("xdoe", None, types=np.ndarray, desc="Initial doe inputs")
        declare("ydoe", None, types=np.ndarray, desc="Initial doe outputs")
        declare("xlimits",
                None,
                types=np.ndarray,
                desc="Bounds of function fun inputs")
        declare("verbose",
                False,
                types=bool,
                desc="Print computation information")

    def optimize(self, fun):
        """
        Optimizes fun

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        [nx, 1]: x optimum
        [1, 1]: y optimum
        int: index of optimum in data arrays 
        [ndoe + n_iter, nx]: coord-x data
        [ndoe + n_iter, 1]: coord-y data
        [ndoe, nx]: coord-x initial doe
        [ndoe, 1]: coord-y initial doe
        """
        xlimits = self.options["xlimits"]
        sampling = LHS(xlimits=xlimits, criterion="ese")

        xdoe = self.options["xdoe"]
        if xdoe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(xdoe)

        ydoe = self.options["ydoe"]
        if ydoe is None:
            y_doe = fun(x_doe)
        else:  # to save time if y_doe is already given to EGO
            y_doe = ydoe

        # to save the initial doe
        x_data = x_doe
        y_data = y_doe

        self.gpr = KRG(print_global=False)

        bounds = xlimits

        criterion = self.options["criterion"]
        n_iter = self.options["n_iter"]
        n_start = self.options["n_start"]
        n_max_optim = self.options["n_max_optim"]

        for k in range(n_iter):

            self.gpr.set_training_values(x_data, y_data)
            self.gpr.train()

            if criterion == "EI":
                self.obj_k = lambda x: -self.EI(np.atleast_2d(x), y_data)
            elif criterion == "SBO":
                self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
            elif criterion == "UCB":
                self.obj_k = lambda x: self.UCB(np.atleast_2d(x))

            success = False
            n_optim = 1  # in order to have some success optimizations with SLSQP
            while not success and n_optim <= n_max_optim:
                opt_all = []
                x_start = sampling(n_start)
                for ii in range(n_start):
                    opt_all.append(
                        minimize(
                            self.obj_k,
                            x_start[ii, :],
                            method="SLSQP",
                            bounds=bounds,
                            options={"maxiter": 200},
                        ))

                opt_all = np.asarray(opt_all)

                opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
                obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
                success = obj_success.size != 0
                if not success:
                    self.log("New start point for the internal optimization")
                    n_optim += 1

            if n_optim >= n_max_optim:
                self.log(
                    "Internal optimization failed at EGO iter = {}".format(k))
                break
            elif success:
                self.log(
                    "Internal optimization succeeded at EGO iter = {}".format(
                        k))

            ind_min = np.argmin(obj_success)
            opt = opt_success[ind_min]
            x_et_k = np.atleast_2d(opt["x"])
            y_et_k = fun(x_et_k)

            y_data = np.atleast_2d(np.append(y_data, y_et_k)).T
            x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))

        ind_best = np.argmin(y_data)
        x_opt = x_data[ind_best]
        y_opt = y_data[ind_best]

        return x_opt, y_opt, ind_best, x_data, y_data, x_doe, y_doe

    def log(self, msg):
        if self.options["verbose"]:
            print(msg)

    def EI(self, points, y_data):
        """ Expected improvement """
        f_min = np.min(y_data)
        pred = self.gpr.predict_values(points)
        var = self.gpr.predict_variances(points)
        args0 = (f_min - pred) / var
        args1 = (f_min - pred) * norm.cdf(args0)
        args2 = var * norm.pdf(args0)
        if var.size == 1 and var == 0.0:  # can be use only if one point is computed
            return 0.0

        ei = args1 + args2
        return ei

    def SBO(self, point):
        """ Surrogate based optimization: min the surrogate model by suing the mean mu """
        res = self.gpr.predict_values(point)
        return res

    def UCB(self, point):
        """ Upper confidence bound optimization: minimize by using mu - 3*sigma """
        pred = self.gpr.predict_values(point)
        var = self.gpr.predict_variances(point)
        res = pred - 3.0 * var
        return res
Esempio n. 19
0
from smt.utils import compute_rms_error 

ndim = 6

train1 = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=200.csv', delimiter=',')
train2 = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=100.csv', delimiter=',')
train = x = np.append(train1, train2 , axis=0)
test = np.loadtxt('./TrainingData/RUh_TrainingData[ese]_n=50.csv', delimiter=',')
xtest, ytest = test[:,:ndim], test[:,ndim]
xt, yt = train[:,:ndim], train[:,ndim:]

# The variable 'theta0' is a list of length ndim.
theta = [0.17675797, 0.0329642, 0.00175843, 0.0328348, 0.00039516, 0.08729705,
 0.00094059, 0.00018145, 0.04470183]
t = KRG(theta0=[1e-2]*ndim,print_prediction = False)
t.set_training_values(xt,yt[:,0])

t.train()

# Prediction of the validation points
y = t.predict_values(xtest)
print('Kriging,  err: '+ str(compute_rms_error(t,xtest,ytest)))

fig = plt.figure()
plt.plot(ytest, ytest, '-', label='$y_{true}$')
plt.plot(ytest, y, 'r.', label='$\hat{y}$')
       
plt.xlabel('$y_{true}$')
plt.ylabel('$\hat{y}$')
        
plt.legend(loc='upper left')