Пример #1
0
def generate_krig(lb, ub, n_krigsamp, nvar, problem, n_cpu):
    init_krigsamp = mcpopgen(lb=lb, ub=ub, ndim=2, n_order=1, n_coeff=3)
    print("Evaluating Kriging Sample")
    ykrig = problem(init_krigsamp)
    print(np.count_nonzero(ykrig <= 0))

    # Set Kriging Info
    KrigInfo = initkriginfo(1)
    KrigInfo["X"] = init_krigsamp
    KrigInfo["y"] = ykrig
    KrigInfo["nvar"] = nvar
    KrigInfo["nsamp"] = n_krigsamp
    KrigInfo["nrestart"] = 5
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["nkernel"] = len(KrigInfo["kernel"])
    KrigInfo["optimizer"] = "lbfgsb"

    #trainkrig
    drm = None
    t = time.time()
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype='default',
                      normy=False,
                      trainvar=False)
    krigobj.train(n_cpu=n_cpu)
    loocverr, _ = krigobj.loocvcalc()
    elapsed = time.time() - t
    print("elapsed time to train Kriging model: ", elapsed, "s")
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj, loocverr, drm
Пример #2
0
    def standardize(self):
        """
        Standardize Kriging samples and create regression matrix.

        Returns:
            None
        """
        Kriging.standardize(self)
Пример #3
0
def generate_kriging(n_cpu):
    # Initialization
    KrigInfo = dict()
    kernel = ["gaussian"]
    # Sampling
    nsample = 40
    nvar = 2
    ub = np.array([5, 5])
    lb = np.array([-5, -5])
    nup = 3
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    y1 = evaluate(X, "styblinski")

    # Initialize KrigInfo
    KrigInfo = initkriginfo()
    # Set KrigInfo
    KrigInfo["X"] = X
    KrigInfo["y"] = y1
    KrigInfo["nvar"] = nvar
    KrigInfo["problem"] = "styblinski"
    KrigInfo["nsamp"] = nsample
    KrigInfo["nrestart"] = 7
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["kernel"] = kernel
    KrigInfo["TrendOrder"] = 0
    KrigInfo["nugget"] = -6
    # KrigInfo["n_princomp"] = 1
    KrigInfo["kernel"] = ["gaussian"]
    KrigInfo["nkernel"] = len(KrigInfo["kernel"])
    KrigInfo["optimizer"] = "lbfgsb"

    # Run Kriging
    t = time.time()
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype="default",
                      normy=False,
                      trainvar=False)
    krigobj.train(n_cpu=n_cpu)
    loocverr, _ = krigobj.loocvcalc()
    elapsed = time.time() - t
    print("elapsed time for train Kriging model: ", elapsed, "s")
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj
Пример #4
0
def generate_kriging(n_cpu):
    # Sampling
    nsample = 20
    nvar = 2
    nobj = 2
    lb = -1 * np.ones(shape=[nvar])
    ub = 1 * np.ones(shape=[nvar])
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    global y
    y = myproblem(X)

    # Initialize KrigInfo
    KrigInfo1 = initkriginfo()
    # Set KrigInfo
    KrigInfo1["X"] = X
    KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
    KrigInfo1["problem"] = myproblem
    KrigInfo1["nrestart"] = 5
    KrigInfo1["ub"] = ub
    KrigInfo1["lb"] = lb
    KrigInfo1["optimizer"] = "lbfgsb"

    # Initialize KrigInfo
    KrigInfo2 = deepcopy(KrigInfo1)
    KrigInfo2['y'] = y[:, 1].reshape(-1, 1)

    # Run Kriging
    krigobj1 = Kriging(KrigInfo1,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj1.train(n_cpu=n_cpu)
    loocverr1, _ = krigobj1.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr1, "%")

    krigobj2 = Kriging(KrigInfo2,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj2.train(n_cpu=n_cpu)
    loocverr2, _ = krigobj2.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr2, "%")

    return krigobj1, krigobj2
Пример #5
0
    def simultpredparego(self, pool=None):
        """
        Perform multi updates on ParEGO MOBO by varying the weighting function.

        Args:
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             xalltemp (nparray) : Array of design variables updates.
             yalltemp (nparray) : Array of objectives value updates.
             metricall (nparray) : Array of metric of the updates.
        """
        idxs = np.random.choice(11, self.multiupdate)
        scalinfotemp = deepcopy(self.KrigScalarizedInfo)
        xalltemp = self.Xall[:, :]
        yalltemp = self.yall[:, :]
        yprednext = np.zeros(shape=[len(self.kriglist)])

        for ii, idx in enumerate(idxs):
            print(f"update number {ii + 1}")
            scalinfotemp['X'] = xalltemp
            scalinfotemp['y'] = paregopre(yalltemp, idx)
            krigtemp = Kriging(scalinfotemp,
                               standardization=True,
                               standtype='default',
                               normy=False,
                               trainvar=False)
            krigtemp.train(disp=False, pool=pool)
            x_n, met_n = run_single_opt(krigtemp,
                                        self.moboInfo,
                                        krigconstlist=self.krigconstlist,
                                        cheapconstlist=self.cheapconstlist,
                                        pool=pool)
            xnext = x_n
            metricnext = met_n
            for jj, krigobj in enumerate(self.kriglist):
                yprednext[jj] = krigobj.predict(xnext, ['pred'])
            if ii == 0:
                xallnext = deepcopy(xnext)
                yallnext = deepcopy(yprednext)
                metricall = deepcopy(metricnext)
            else:
                xallnext = np.vstack((xallnext, xnext))
                yallnext = np.vstack((yallnext, yprednext))
                metricall = np.vstack((metricall, metricnext))

        yalltemp = np.vstack((yalltemp, yprednext))
        xalltemp = np.vstack((xalltemp, xnext))

        return xallnext, yallnext, metricall
Пример #6
0
def generate_krig(init_samp, krigsamp, nvar, problem):

    # Monte Carlo Sampling
    t1 = time.time()
    init_krigsamp = krigsamp
    n_krigsamp = np.size(krigsamp, 0)
    ykrig = evaluate(init_krigsamp, type=problem)
    t2 = time.time()

    init_samp_G = evaluate(init_samp, type=problem)
    total_samp = np.hstack((init_samp, init_samp_G)).transpose()
    positive_samp = total_samp[:, total_samp[nvar] >= 0]
    positive_samp = positive_samp.transpose()
    nsamp = np.size(init_samp, 0)
    npos = np.size(positive_samp, 0)
    Pfreal = 1 - npos / nsamp

    lb = np.floor(np.min(init_samp)) * np.ones(shape=[nvar])
    ub = np.ceil(np.max(init_samp)) * np.ones(shape=[nvar])

    # Set Kriging Info
    KrigInfo = initkriginfo("single")
    KrigInfo["X"] = init_krigsamp
    KrigInfo["y"] = ykrig
    KrigInfo["nvar"] = nvar
    KrigInfo["nsamp"] = n_krigsamp
    KrigInfo["nrestart"] = 5
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["nkernel"] = len(KrigInfo["kernel"])
    KrigInfo["optimizer"] = "lbfgsb"

    #trainkrig
    t = time.time()
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype='default',
                      normy=False,
                      trainvar=False)
    krigobj.train(parallel=False)
    loocverr, _ = krigobj.loocvcalc()
    elapsed = time.time() - t
    print("elapsed time for train Kriging model: ", elapsed, "s")
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj, Pfreal
Пример #7
0
    def standardize(self):
        """
        Standardize Kriging samples and create regression matrix.

        Returns:
            None
        """
        Kriging.standardize(self)

        # Calculate PLS coeff
        _pls = pls(self.n_princomp)
        if self.standardization is True:
            coeff_pls = _pls.fit(self.KrigInfo["X_norm"].copy(),
                                 self.KrigInfo['y'].copy()).x_rotations_
        else:
            coeff_pls = _pls.fit(self.KrigInfo["X"].copy(),
                                 self.KrigInfo['y'].copy()).x_rotations_
        self.KrigInfo["plscoeff"] = coeff_pls
Пример #8
0
def generate_kriging(n_cpu):
    # Sampling
    nsample = 10
    nvar = 2
    lb = np.array([-5, -5])
    ub = np.array([5, 5])
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    # global y
    y = evaluate(X, "styblinski")

    # Initialize KrigInfo
    # global KrigInfo
    KrigInfo = initkriginfo()
    # Set KrigInfo
    KrigInfo["X"] = X
    KrigInfo["y"] = y
    KrigInfo["problem"] = "styblinski"
    KrigInfo["nrestart"] = 5
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["optimizer"] = "lbfgsb"

    # Run Kriging
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype='default',
                      normy=False,
                      trainvar=False)
    krigobj.train(n_cpu=n_cpu)
    loocverr, _ = krigobj.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj
Пример #9
0
    def kpcaopt(self, w, KPCAkernel, orig_X, out='default'):
        # Calculate PLS coeff
        if KPCAkernel != "gaussian" and KPCAkernel != "precomputed":
            if KPCAkernel.lower() == 'poly' or KPCAkernel.lower(
            ) == 'polynomial':
                _drm = drm(self.n_princomp,
                           kernel='poly',
                           gamma=10**w[0],
                           coef0=10**w[1],
                           degree=np.round(w[2]))
            elif KPCAkernel.lower() == 'sigmoid':
                _drm = drm(self.n_princomp,
                           kernel='sigmoid',
                           gamma=10**w[0],
                           coef0=10**w[1])
            elif KPCAkernel.lower() == 'rbf':
                _drm = drm(self.n_princomp, kernel='rbf', gamma=10**w[0])
            elif KPCAkernel.lower() == 'linear' or KPCAkernel.lower(
            ) == 'cosine':
                _drm = drm(self.n_princomp)

            self.KrigInfo["nvar"] = self.n_princomp
            if self.standardization is True:
                self.KrigInfo["X_norm"] = deepcopy(orig_X)
                _drm.fit(self.KrigInfo["X_norm"].copy())
                transformed = _drm.transform(self.KrigInfo["X_norm"].copy())
                self.KrigInfo["lb2"] = (
                    np.min(transformed,
                           axis=0))  # Create lowerbound for transformed X
                self.KrigInfo["ub2"] = (
                    np.max(transformed,
                           axis=0))  # Create upperbound for transformed X
                self.KrigInfo["X_norm"] = standardize(
                    transformed,
                    self.KrigInfo['y'],
                    type=self.standtype.lower(),
                    range=np.vstack(
                        (self.KrigInfo["lb2"], self.KrigInfo["ub2"])))
                self.KrigInfo['idx'] = polytruncation(
                    self.KrigInfo["TrendOrder"], self.KrigInfo["nvar"], 1)
            else:
                self.KrigInfo["X"] = deepcopy(orig_X)
                _drm.fit(self.KrigInfo["X"].copy())
                transformed = _drm.transform(self.KrigInfo["X"].copy())
                self.KrigInfo["X"] = transformed
                self.KrigInfo['idx'] = polytruncation(
                    self.KrigInfo["TrendOrder"], self.KrigInfo["nvar"], 1)

        else:
            n_features = np.size(orig_X, 1)
            self.KrigInfo["nvar"] = self.n_princomp
            _drm = drm(self.n_princomp, kernel='precomputed')
            k_mat = customkernel(orig_X,
                                 orig_X,
                                 w,
                                 n_features,
                                 type='gaussian')
            if self.standardization is True:
                self.KrigInfo["X_norm"] = deepcopy(orig_X)
                transformed = _drm.fit_transform(k_mat)
                self.KrigInfo["lb2"] = (
                    np.min(transformed,
                           axis=0))  # Create lowerbound for transformed X
                self.KrigInfo["ub2"] = (
                    np.max(transformed,
                           axis=0))  # Create upperbound for transformed X
                self.KrigInfo["X_norm"] = standardize(
                    transformed,
                    self.KrigInfo['y'],
                    type=self.standtype.lower(),
                    range=np.vstack(
                        (self.KrigInfo["lb2"], self.KrigInfo["ub2"])))
                self.KrigInfo['idx'] = polytruncation(
                    self.KrigInfo["TrendOrder"], self.KrigInfo["nvar"], 1)
            else:
                pass

        if out == 'default':
            self.KrigInfo["kernel"] = ["iso_gaussian"]
            Kriging.train(self, disp=False)
        else:
            self.KrigInfo["kernel"] = ["gaussian"]
            Kriging.train(self, disp=False, pre_theta=self.KrigInfo['Theta'])

        loocverr, _ = Kriging.loocvcalc(self, drm=_drm)

        if out == 'default':
            return loocverr
        elif out == 'all':
            return _drm, loocverr
Пример #10
0
    def create_krig(self, obj_krig_map=None, con_krig_map=None, n_cpu=1):
        """Initialise and train Kriging models.

        Default settings for the objective or constraint Krigings can
        be overidden by setting the obj_krig_map or con_krig_map dict.
        The dictionary should take the form:

            e.g.
            map = {'default': {'nrestart': 5,
                               'optimizer': 'lbfgsb',
                               },
                   'CD': {'optimizer': 'cobyla',
                          },
                   'CL': {'nrestart': 10,
                          'limittype': '>=',
                          'limit': 0.15},
                          },
                   }

        -where the dict key is used to identify the objective or
        constraint by label (int, if no explicit x_label and y_label set
        previously). The subdict key-value pairs are set in each
        surrogate_models.supports.initinfo.initkriginfo('single'). The
        'default' dict is applied first and can be overridden by the
        following dictionaries.

        Args:
            obj_krig_map (dict(dict()), optional): Map specific settings
                onto objective Kriging models via the labels.
            con_krig_map (dict(dict()), optional): Map specific settings
                onto constraint Kriging models via the labels.
            n_cpu (int, optional): If > 1, uses parallel processing.
                Defaults to 1.
        """
        def apply_krig_map(krig_info, map, label):
            """Helper func. Apply 'default' dict, then labeled dict"""
            if 'default' in map:
                for k, v in map['default'].items():
                    print(f"Setting {label} Kriging defaults '{k}': {v}")
                    krig_info[k] = v
            if label in map:
                for k, v in map[label].items():
                    print(f"Setting {label} Kriging '{k}': {v}")
                    krig_info[k] = v

        # Set up Kriging for each objective
        obj_infos = []
        for i in range(self.n_obj):
            krig_multi_info = initkriginfo()
            krig_multi_info["X"] = self.X
            krig_multi_info["y"] = self.y[:, i].reshape(-1, 1)
            krig_multi_info["ub"] = self.ub
            krig_multi_info["lb"] = self.lb

            label = self.y_labels[i]
            if obj_krig_map is not None:
                apply_krig_map(krig_multi_info, obj_krig_map, label)
            obj_infos.append(krig_multi_info)

        # Set up Kriging for each constraint
        con_infos = []
        for i in range(self.n_con):
            krig_multi_info = initkriginfo()
            krig_multi_info["X"] = self.X
            krig_multi_info["y"] = self.g[:, i].reshape(-1, 1)
            krig_multi_info["ub"] = self.ub
            krig_multi_info["lb"] = self.lb

            label = self.g_labels[i]
            if con_krig_map is not None:
                apply_krig_map(krig_multi_info, con_krig_map, label)
            con_infos.append(krig_multi_info)

        # Train Kriging models
        start_total_train = time.time()
        for i, krig_info in enumerate(obj_infos):
            krig_obj = Kriging(krig_info, standardization=True,
                               standtype='default', normy=False,
                               trainvar=False)
            start_train = time.time()
            krig_obj.train(n_cpu=n_cpu)
            t = time.time() - start_train
            print(f'{self.y_labels[i]} training time: {t:.2f} seconds')
            loocve, _ = krig_obj.loocvcalc()
            print(f'Objective {self.y_labels[i]} LOOCVE: {loocve}')
            self.obj_krig.append(krig_obj)
            self.obj_loocve.append(loocve)
            self.obj_time.append(t)

        for i, krig_info in enumerate(con_infos):
            krig_con = Kriging(krig_info, standardization=True,
                               standtype='default', normy=False,
                               trainvar=False)
            start_train = time.time()
            t = time.time() - start_train
            print(f'{self.y_labels[i]} training time: {t:.2f} seconds')
            krig_con.train(n_cpu=n_cpu)
            loocve, _ = krig_con.loocvcalc()
            print(f'Constraint {self.g_labels[i]} LOOCVE: {loocve}')
            self.con_krig.append(krig_con)
            self.con_loocve.append(loocve)
            self.con_time.append(t)

        elapsed = time.time() - start_total_train
        print(f'Total training time: {elapsed:.2f} seconds')

        # Save data for summary
        self.total_train_time = elapsed
        self.obj_krig_map = obj_krig_map
        self.con_krig_map = con_krig_map
Пример #11
0
    def run(self, disp=True, infeasible=None):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool): Display process or not. Defaults to True

        Returns:
            xupdate (nparray): Array of design variables updates.
            yupdate (nparray): Array of objectives updates
            metricall (nparray): Array of metric values of the updates.

        """

        self.nup = 0  # Number of current iteration
        self.Xall = self.kriglist[0].KrigInfo['X']
        self.yall = np.zeros(shape=[
            np.size(self.kriglist[0].KrigInfo["y"], axis=0),
            len(self.kriglist)
        ])
        for ii in range(np.size(self.yall, axis=1)):
            self.yall[:, ii] = self.kriglist[ii].KrigInfo["y"][:, 0]

        if infeasible is not None:
            self.yall = np.delete(self.yall.copy(), infeasible, 0)
            self.Xall = np.delete(self.Xall.copy(), infeasible, 0)
        else:
            pass

        self.ypar, _ = searchpareto.paretopoint(self.yall)

        print("Begin multi-objective Bayesian optimization process.")
        if self.autoupdate and disp:
            print(
                f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall,0)}, "
                f"Maximum no. updates: {self.moboInfo['nup']+1}")
        else:
            pass

        # If the optimizer is ParEGO, create a scalarized Kriging
        if self.moboInfo['acquifunc'].lower() == 'parego':
            self.KrigScalarizedInfo = deepcopy(self.kriglist[0].KrigInfo)
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False)
        else:
            pass

        # Perform update on design space
        if self.moboInfo['acquifunc'].lower() == 'ehvi':
            self.ehviupdate(disp)
        elif self.moboInfo['acquifunc'].lower() == 'parego':
            self.paregoupdate(disp)
        else:
            raise ValueError(self.moboInfo["acquifunc"],
                             " is not a valid acquisition function.")

        # Finish optimization and return values
        if disp:
            print("Optimization finished, now creating the final outputs.")

        if self.multiupdate == 0 or self.multiupdate == 1:
            xupdate = self.Xall[-self.moboInfo['nup']:, :]
            yupdate = self.yall[-self.moboInfo['nup']:, :]
            supdate = self.spredall[-self.moboInfo['nup']:, :]
        else:
            xupdate = self.Xall[(-self.moboInfo['nup'] * self.multiupdate):, :]
            yupdate = self.yall[(-self.moboInfo['nup'] * self.multiupdate):, :]
            supdate = self.spredall[(-self.moboInfo['nup'] *
                                     self.multiupdate):, :]
        metricall = self.metricall

        return xupdate, yupdate, supdate, metricall
Пример #12
0
    def enrich(self, xnext):
        """
        Evaluate and enrich experimental design.

        Args:
            xnext: Next design variable(s) to be evaluated.

        Returns:
            None
        """
        # Evaluate new sample
        if type(self.kriglist[0].KrigInfo['problem']) == str:
            if np.ndim(xnext) == 1:
                ynext = evaluate(xnext, self.kriglist[0].KrigInfo['problem'])
            else:
                ynext = np.zeros(shape=[np.size(xnext, 0), len(self.kriglist)])
                for ii in range(np.size(xnext, 0)):
                    ynext[ii, :] = evaluate(
                        xnext[ii, :], self.kriglist[0].KrigInfo['problem'])
        elif callable(self.kriglist[0].KrigInfo['problem']):
            ynext = self.kriglist[0].KrigInfo['problem'](xnext)
        else:
            raise ValueError(
                'KrigInfo["problem"] is not a string nor a callable function!')

        if self.krigconstlist is not None:
            for idx, constobj in enumerate(self.krigconstlist):
                if type(constobj.KrigInfo['problem']) == str:
                    ynext_const = evaluate(xnext, constobj.KrigInfo['problem'])
                elif callable(constobj.KrigInfo['problem']):
                    ynext_const = constobj.KrigInfo['problem'](xnext).reshape(
                        -1, 1)
                else:
                    raise ValueError(
                        'KrigConstInfo["problem"] is not a string nor a callable function!'
                    )
                constobj.KrigInfo['X'] = np.vstack(
                    (constobj.KrigInfo['X'], xnext))
                constobj.KrigInfo['y'] = np.vstack(
                    (constobj.KrigInfo['y'], ynext_const))
                constobj.standardize()
                constobj.train(disp=False)
        else:
            pass

        # Treatment for failed solutions, Reference : "Forrester, A. I., Sóbester, A., & Keane, A. J. (2006). Optimization with missing data.
        # Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 462(2067), 935-945."
        if np.isnan(ynext).any() is True:
            for jj in range(len(self.kriglist)):
                SSqr, y_hat = self.kriglist[jj].predict(
                    xnext, ['SSqr', 'pred'])
                ynext[0, jj] = y_hat + SSqr

        # Enrich experimental design
        self.yall = np.vstack((self.yall, ynext))
        self.Xall = np.vstack((self.Xall, xnext))
        self.ypar, I = searchpareto.paretopoint(
            self.yall)  # Recompute non-dominated solutions

        if self.moboInfo['acquifunc'] == 'ehvi':
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False)
        elif self.moboInfo['acquifunc'] == 'parego':
            self.KrigScalarizedInfo['X'] = self.Xall
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False)
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False)
        else:
            raise ValueError(self.moboInfo["acquifunc"],
                             " is not a valid acquisition function.")

        # Save data
        if self.savedata:
            I = I.astype(int)
            Xbest = self.Xall[I, :]
            sio.savemat(self.moboInfo["filename"], {
                "xbest": Xbest,
                "ybest": self.ypar
            })
Пример #13
0
class MOBO:
    """
    Perform multi-objective Bayesian Optimization

    Args:
        moboInfo (dict): Dictionary containing necessary information for multi-objective Bayesian optimization.
        kriglist (list): List of Kriging object.
        autoupdate (bool): True or False, depends on your decision to evaluate your function automatically or not.
        multiupdate (int): Number of suggested samples returned for each iteration.
        expconst (list): List of constraints Kriging object.
        chpconst (list): List of cheap constraint function.

    Returns:
        xupdate (nparray): Array of design variables updates.
        yupdate (nparray): Array of objectives updates
        metricall (nparray): Array of metric values of the updates.
    """
    def __init__(self,
                 moboInfo,
                 kriglist,
                 autoupdate=True,
                 multiupdate=0,
                 savedata=True,
                 expconst=None,
                 chpconst=None):
        """
        Initialize MOBO class

        Args:
            moboInfo (dict): Dictionary containing necessary information for multi-objective Bayesian optimization.
            kriglist (list): List of Kriging object.
            autoupdate (bool): True or False, depends on your decision to evaluate your function automatically or not.
            multiupdate (int): Number of suggested samples returned for each iteration.
            savedata (bool): Save data for each iteration or not. Defaults to True.
            expconst (list): List of constraints Kriging object.
            chpconst (list): List of cheap constraint function.

        """
        self.moboInfo = moboinfocheck(moboInfo, autoupdate)
        self.kriglist = kriglist
        self.krignum = len(self.kriglist)
        self.autoupdate = autoupdate
        self.multiupdate = multiupdate
        self.savedata = savedata
        self.krigconstlist = expconst
        self.cheapconstlist = chpconst

    def run(self, disp=True, infeasible=None):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool): Display process or not. Defaults to True

        Returns:
            xupdate (nparray): Array of design variables updates.
            yupdate (nparray): Array of objectives updates
            metricall (nparray): Array of metric values of the updates.

        """

        self.nup = 0  # Number of current iteration
        self.Xall = self.kriglist[0].KrigInfo['X']
        self.yall = np.zeros(shape=[
            np.size(self.kriglist[0].KrigInfo["y"], axis=0),
            len(self.kriglist)
        ])
        for ii in range(np.size(self.yall, axis=1)):
            self.yall[:, ii] = self.kriglist[ii].KrigInfo["y"][:, 0]

        if infeasible is not None:
            self.yall = np.delete(self.yall.copy(), infeasible, 0)
            self.Xall = np.delete(self.Xall.copy(), infeasible, 0)
        else:
            pass

        self.ypar, _ = searchpareto.paretopoint(self.yall)

        print("Begin multi-objective Bayesian optimization process.")
        if self.autoupdate and disp:
            print(
                f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall,0)}, "
                f"Maximum no. updates: {self.moboInfo['nup']+1}")
        else:
            pass

        # If the optimizer is ParEGO, create a scalarized Kriging
        if self.moboInfo['acquifunc'].lower() == 'parego':
            self.KrigScalarizedInfo = deepcopy(self.kriglist[0].KrigInfo)
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False)
        else:
            pass

        # Perform update on design space
        if self.moboInfo['acquifunc'].lower() == 'ehvi':
            self.ehviupdate(disp)
        elif self.moboInfo['acquifunc'].lower() == 'parego':
            self.paregoupdate(disp)
        else:
            raise ValueError(self.moboInfo["acquifunc"],
                             " is not a valid acquisition function.")

        # Finish optimization and return values
        if disp:
            print("Optimization finished, now creating the final outputs.")

        if self.multiupdate == 0 or self.multiupdate == 1:
            xupdate = self.Xall[-self.moboInfo['nup']:, :]
            yupdate = self.yall[-self.moboInfo['nup']:, :]
            supdate = self.spredall[-self.moboInfo['nup']:, :]
        else:
            xupdate = self.Xall[(-self.moboInfo['nup'] * self.multiupdate):, :]
            yupdate = self.yall[(-self.moboInfo['nup'] * self.multiupdate):, :]
            supdate = self.spredall[(-self.moboInfo['nup'] *
                                     self.multiupdate):, :]
        metricall = self.metricall

        return xupdate, yupdate, supdate, metricall

    def ehviupdate(self, disp):
        """
        Update MOBO using EHVI algorithm.

        Args:
            disp (bool): Display process or not.

        Returns:
             None
        """
        self.spredall = deepcopy(self.yall)
        self.spredall[:] = 0
        while self.nup < self.moboInfo['nup']:
            # Iteratively update the reference point for hypervolume computation if EHVI is used as the acquisition function
            if self.moboInfo['refpointtype'].lower() == 'dynamic':
                self.moboInfo['refpoint'] = np.max(
                    self.yall,
                    0) + (np.max(self.yall, 0) - np.min(self.yall, 0)) * 2

            # Perform update(s)
            if self.multiupdate < 0:
                raise ValueError(
                    "Number of multiple update must be greater or equal to 0")
            elif self.multiupdate == 0 or self.multiupdate == 1:
                xnext, metricnext = run_multi_opt(self.kriglist, self.moboInfo,
                                                  self.ypar,
                                                  self.krigconstlist,
                                                  self.cheapconstlist)
                yprednext = np.zeros(shape=[2])
                sprednext = np.zeros(shape=[2])
                for ii, krigobj in enumerate(self.kriglist):
                    yprednext[ii], sprednext[ii] = krigobj.predict(
                        xnext, ['pred', 's'])
            else:
                xnext, yprednext, sprednext, metricnext = self.simultpredehvi(
                    disp)

            if self.nup == 0:
                self.metricall = metricnext
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if self.autoupdate is False:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                self.spredall = np.vstack((self.spredall, sprednext))
                break
            else:
                pass

            # Evaluate and enrich experimental design
            self.enrich(xnext)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(
                    f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall, 0)}, "
                    f"Maximum no. updates: {self.moboInfo['nup']+1}")

    def paregoupdate(self, disp):
        """
        Update MOBO using ParEGO algorithm.

        Args:
            disp (bool): Display process or not.

        Returns:
             None
        """
        while self.nup < self.moboInfo['nup']:
            # Perform update(s)
            if self.multiupdate < 0:
                raise ValueError(
                    "Number of multiple update must be greater or equal to 0")
            elif self.multiupdate == 0 or self.multiupdate == 1:
                xnext, metricnext = run_single_opt(self.scalkrig,
                                                   self.moboInfo,
                                                   self.krigconstlist,
                                                   self.cheapconstlist)
                yprednext = np.zeros(shape=[2])
                for ii, krigobj in enumerate(self.kriglist):
                    yprednext[ii] = krigobj.predict(xnext, ['pred'])
            else:
                xnext, yprednext, metricnext = self.simultpredparego()

            if self.nup == 0:
                self.metricall = metricnext
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if self.autoupdate is False:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                break
            else:
                pass

            # Evaluate and enrich experimental design
            self.enrich(xnext)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(
                    f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall, 0)}, "
                    f"Maximum no. updates: {self.moboInfo['nup']+1}")

    def simultpredehvi(self, disp=False):
        """
        Perform multi updates on EHVI MOBO using Kriging believer method.

        Returns:
             xalltemp (nparray) : Array of design variables updates.
             yalltemp (nparray) : Array of objectives value updates.
             metricall (nparray) : Array of metric of the updates.
        """

        krigtemp = [0] * len(self.kriglist)
        for index, obj in enumerate(self.kriglist):
            krigtemp[index] = deepcopy(obj)
        yprednext = np.zeros(shape=[len(krigtemp)])
        sprednext = np.zeros(shape=[len(krigtemp)])
        ypartemp = self.ypar
        yall = self.yall

        for ii in range(self.multiupdate):
            t1 = time.time()
            if disp:
                print(f"update number {ii+1}")
            else:
                pass

            xnext, metrictemp = run_multi_opt(krigtemp, self.moboInfo,
                                              ypartemp, self.krigconstlist,
                                              self.cheapconstlist)
            bound = np.vstack(
                (-np.ones(shape=[1, krigtemp[0].KrigInfo["nvar"]]),
                 np.ones(shape=[1, krigtemp[0].KrigInfo["nvar"]])))

            for jj in range(len(krigtemp)):
                yprednext[jj], sprednext[jj] = krigtemp[jj].predict(
                    xnext, ['pred', 's'])
                krigtemp[jj].KrigInfo['X'] = np.vstack(
                    (krigtemp[jj].KrigInfo['X'], xnext))
                krigtemp[jj].KrigInfo['y'] = np.vstack(
                    (krigtemp[jj].KrigInfo['y'], yprednext[jj]))
                krigtemp[jj].standardize()
                krigtemp[jj].KrigInfo["F"] = compute_regression_mat(
                    krigtemp[jj].KrigInfo["idx"],
                    krigtemp[jj].KrigInfo["X_norm"], bound,
                    np.ones(shape=[krigtemp[jj].KrigInfo["nvar"]]))
                krigtemp[jj].KrigInfo = likelihood(
                    krigtemp[jj].KrigInfo['Theta'],
                    krigtemp[jj].KrigInfo,
                    mode='all',
                    trainvar=krigtemp[jj].trainvar)

            if ii == 0:
                xalltemp = deepcopy(xnext)
                yalltemp = deepcopy(yprednext)
                salltemp = deepcopy(sprednext)
                metricall = deepcopy(metrictemp)
            else:
                xalltemp = np.vstack((xalltemp, xnext))
                yalltemp = np.vstack((yalltemp, yprednext))
                salltemp = np.vstack((salltemp, sprednext))
                metricall = np.vstack((metricall, metrictemp))

            yall = np.vstack((yall, yprednext))
            ypartemp, _ = searchpareto.paretopoint(yall)

            if disp:
                print("time: ", time.time() - t1, " s")

        return [xalltemp, yalltemp, salltemp, metricall]

    def simultpredparego(self):
        """
        Perform multi updates on ParEGO MOBO by varying the weighting function.

        Returns:
             xalltemp (nparray) : Array of design variables updates.
             yalltemp (nparray) : Array of objectives value updates.
             metricall (nparray) : Array of metric of the updates.
        """
        idxs = np.random.choice(11, self.multiupdate)
        scalinfotemp = deepcopy(self.KrigScalarizedInfo)
        xalltemp = self.Xall[:, :]
        yalltemp = self.yall[:, :]
        yprednext = np.zeros(shape=[len(self.kriglist)])

        for ii, idx in enumerate(idxs):
            print(f"update number {ii + 1}")
            scalinfotemp['X'] = xalltemp
            scalinfotemp['y'] = paregopre(yalltemp, idx)
            krigtemp = Kriging(scalinfotemp,
                               standardization=True,
                               standtype='default',
                               normy=False,
                               trainvar=False)
            krigtemp.train(disp=False)
            xnext, metricnext = run_single_opt(krigtemp, self.moboInfo,
                                               self.krigconstlist,
                                               self.cheapconstlist)
            for jj, krigobj in enumerate(self.kriglist):
                yprednext[jj] = krigobj.predict(xnext, ['pred'])
            if ii == 0:
                xallnext = deepcopy(xnext)
                yallnext = deepcopy(yprednext)
                metricall = deepcopy(metricnext)
            else:
                xallnext = np.vstack((xallnext, xnext))
                yallnext = np.vstack((yallnext, yprednext))
                metricall = np.vstack((metricall, metricnext))

        yalltemp = np.vstack((yalltemp, yprednext))
        xalltemp = np.vstack((xalltemp, xnext))

        return xallnext, yallnext, metricall

    def enrich(self, xnext):
        """
        Evaluate and enrich experimental design.

        Args:
            xnext: Next design variable(s) to be evaluated.

        Returns:
            None
        """
        # Evaluate new sample
        if type(self.kriglist[0].KrigInfo['problem']) == str:
            if np.ndim(xnext) == 1:
                ynext = evaluate(xnext, self.kriglist[0].KrigInfo['problem'])
            else:
                ynext = np.zeros(shape=[np.size(xnext, 0), len(self.kriglist)])
                for ii in range(np.size(xnext, 0)):
                    ynext[ii, :] = evaluate(
                        xnext[ii, :], self.kriglist[0].KrigInfo['problem'])
        elif callable(self.kriglist[0].KrigInfo['problem']):
            ynext = self.kriglist[0].KrigInfo['problem'](xnext)
        else:
            raise ValueError(
                'KrigInfo["problem"] is not a string nor a callable function!')

        if self.krigconstlist is not None:
            for idx, constobj in enumerate(self.krigconstlist):
                if type(constobj.KrigInfo['problem']) == str:
                    ynext_const = evaluate(xnext, constobj.KrigInfo['problem'])
                elif callable(constobj.KrigInfo['problem']):
                    ynext_const = constobj.KrigInfo['problem'](xnext).reshape(
                        -1, 1)
                else:
                    raise ValueError(
                        'KrigConstInfo["problem"] is not a string nor a callable function!'
                    )
                constobj.KrigInfo['X'] = np.vstack(
                    (constobj.KrigInfo['X'], xnext))
                constobj.KrigInfo['y'] = np.vstack(
                    (constobj.KrigInfo['y'], ynext_const))
                constobj.standardize()
                constobj.train(disp=False)
        else:
            pass

        # Treatment for failed solutions, Reference : "Forrester, A. I., Sóbester, A., & Keane, A. J. (2006). Optimization with missing data.
        # Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 462(2067), 935-945."
        if np.isnan(ynext).any() is True:
            for jj in range(len(self.kriglist)):
                SSqr, y_hat = self.kriglist[jj].predict(
                    xnext, ['SSqr', 'pred'])
                ynext[0, jj] = y_hat + SSqr

        # Enrich experimental design
        self.yall = np.vstack((self.yall, ynext))
        self.Xall = np.vstack((self.Xall, xnext))
        self.ypar, I = searchpareto.paretopoint(
            self.yall)  # Recompute non-dominated solutions

        if self.moboInfo['acquifunc'] == 'ehvi':
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False)
        elif self.moboInfo['acquifunc'] == 'parego':
            self.KrigScalarizedInfo['X'] = self.Xall
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False)
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False)
        else:
            raise ValueError(self.moboInfo["acquifunc"],
                             " is not a valid acquisition function.")

        # Save data
        if self.savedata:
            I = I.astype(int)
            Xbest = self.Xall[I, :]
            sio.savemat(self.moboInfo["filename"], {
                "xbest": Xbest,
                "ybest": self.ypar
            })
Пример #14
0
def construct_krig(X, y, g, lb, ub, n_cpu):
    # Define input for constraint Kriging
    KrigConstInfo = initkriginfo()
    KrigConstInfo['X'] = X
    KrigConstInfo['y'] = g.reshape(-1, 1)  # should be in shape (n,1)
    KrigConstInfo['problem'] = exp_const_eval
    KrigConstInfo["nrestart"] = 5
    KrigConstInfo["ub"] = ub
    KrigConstInfo["lb"] = lb
    KrigConstInfo["optimizer"] = "lbfgsb"
    KrigConstInfo[
        'limittype'] = '>='  # value of the expensive constraints should be more than equal 7.7
    KrigConstInfo['limit'] = 7.7

    # Define input for first objective Kriging
    KrigInfo1 = initkriginfo()
    KrigInfo1["X"] = X
    KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
    KrigInfo1["problem"] = cust_func
    KrigInfo1["nrestart"] = 5
    KrigInfo1["ub"] = ub
    KrigInfo1["lb"] = lb
    KrigInfo1["optimizer"] = "lbfgsb"

    # Define input for second objective Kriging
    KrigInfo2 = deepcopy(KrigInfo1)
    KrigInfo2['y'] = y[:, 1].reshape(-1, 1)

    # Run Kriging
    krigobj1 = Kriging(KrigInfo1,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj1.train(n_cpu=n_cpu)
    loocverr1, _ = krigobj1.loocvcalc()

    krigobj2 = Kriging(KrigInfo2,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj2.train(n_cpu=n_cpu)
    loocverr2, _ = krigobj2.loocvcalc()

    krigconst = Kriging(KrigConstInfo,
                        standardization=True,
                        standtype='default',
                        normy=False,
                        trainvar=False)
    krigconst.train(n_cpu=n_cpu)
    loocverrConst, _ = krigconst.loocvcalc()

    print('LOOCV 1: ', loocverr1)
    print('LOOCV 2: ', loocverr2)
    print('LOOCV Constraint: ', loocverrConst)

    # List of Kriging objects, objective and constraints should be separated
    kriglist = [krigobj1, krigobj2]
    expconstlist = [krigconst]

    return kriglist, expconstlist
Пример #15
0
class MOBO:
    """
    Perform multi-objective Bayesian Optimization

    Args:
        moboInfo (dict): Dictionary containing necessary information
            for multi-objective Bayesian optimization.
        kriglist ([kriging_model.Kriging]): List of objective Kriging
            instances.
        autoupdate (bool, optional): Automatically continue evaluations
            of the objective functions. Defaults to True.
        multiupdate (int, optional): Number of suggested samples
            returned for each iteration. Defaults to 0.
        savedata (bool, optional): Save data for each iteration.
            Defaults to True.
        expconst ([kriging_model.Kriging], optional): Kriging instances
            for constraints. Defaults to None.
        chpconst ([func], optional): Constraint functions. Defaults to
            None. Expected output of the constraint functions is 1 if
            satisfied and 0 if not. The constraint functions MUST have
            an input of x (the decision variable to be evaluated).

    Returns:
        xupdate (np.ndarray): Array of design variables updates.
        yupdate (np.ndarray): Array of objectives updates
        metricall (np.ndarray): Array of metric values of the updates.
    """
    def __init__(self,
                 moboInfo,
                 kriglist,
                 autoupdate=True,
                 multiupdate=0,
                 savedata=True,
                 expconst=None,
                 chpconst=None):
        """
        Initialize MOBO class

        Args:
            moboInfo (dict): Dictionary containing necessary information
                for multi-objective Bayesian optimization.
            kriglist ([kriging_model.Kriging]): List of objective
                Kriging instances.
            autoupdate (bool, optional): Automatically continue
                evaluations of the objective functions. Defaults to
                True.
            multiupdate (int, optional): Number of suggested samples
                returned for each iteration. Defaults to 0.
            savedata (bool, optional): Save data for each iteration.
                Defaults to True.
            expconst ([kriging_model.Kriging], optional): Kriging
                instances for constraints. Defaults to None.
            chpconst ([func], optional): Constraint functions. Defaults
                to None. Expected output of the constraint functions is
                1 if satisfied and 0 if not. The constraint functions
                MUST have an input of x (the decision variable to be
                evaluated).
        """
        n_krig = len(kriglist)
        self.moboInfo = moboinfocheck(moboInfo, autoupdate, n_krig)
        self.kriglist = kriglist
        self.krignum = n_krig
        self.autoupdate = autoupdate
        self.multiupdate = multiupdate
        self.savedata = savedata
        self.krigconstlist = expconst
        self.cheapconstlist = chpconst

    def run(self, disp=True, infeasible=None, n_cpu=1):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            infeasible (np.ndarray, optional): Indices of infeasible
                samples to delete. Defaults to None.
            n_cpu (int, optional): The number of processors to use in a
                multiprocessing.Pool. Default 1 will not run a pool.

        Returns:
            xalltemp (np.ndarray): [n_kb, n_dv] array of update design
                variable values.
            yalltemp (np.ndarray): [n_kb, n_obj] array of update
                objective values.
            salltemp (np.ndarray): [n_kb, n_obj] array of update
                objective uncertainty values.
            metricall (np.ndarray): [n_kb, 1] array of update metric
                values.
        """

        print(f'Running with n_cpu: {n_cpu} for supported functions.')
        if n_cpu == 1:
            return self._run(disp=disp, pool=None)
        else:
            with mp.Pool(processes=n_cpu) as pool:
                return self._run(disp=disp, pool=pool)

    def _run(self, disp=True, infeasible=None, pool=None):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            infeasible (np.ndarray, optional): Indices of infeasible
                samples to delete. Defaults to None.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
            xalltemp (np.ndarray): [n_kb, n_dv] array of update design
                variable values.
            yalltemp (np.ndarray): [n_kb, n_obj] array of update
                objective values.
            salltemp (np.ndarray): [n_kb, n_obj] array of update
                objective uncertainty values.
            metricall (np.ndarray): [n_kb, 1] array of update metric
                values.
        """
        self.nup = 0  # Number of current iteration
        self.Xall = self.kriglist[0].KrigInfo['X']
        n_samp = self.kriglist[0].KrigInfo["nsamp"]
        n_krig = len(self.kriglist)
        self.yall = np.zeros([n_samp, n_krig])
        for ii in range(n_krig):
            self.yall[:, ii] = self.kriglist[ii].KrigInfo["y"][:, 0]

        if infeasible is not None:
            self.yall = np.delete(self.yall.copy(), infeasible, 0)
            self.Xall = np.delete(self.Xall.copy(), infeasible, 0)

        self.ypar, _ = searchpareto.paretopoint(self.yall)

        print("Begin multi-objective Bayesian optimization process.")
        if self.autoupdate and disp:
            print(f"Update no.: {self.nup + 1}, F-count: {n_samp}, "
                  f"Maximum no. updates: {self.moboInfo['nup'] + 1}")

        # If the optimizer is ParEGO, create a scalarized Kriging
        if self.moboInfo['acquifunc'].lower() == 'parego':
            self.KrigScalarizedInfo = deepcopy(self.kriglist[0].KrigInfo)
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False, pool=pool)

        # Perform update on design space
        if self.moboInfo['acquifunc'].lower().startswith('ehvi'):
            self.ehviupdate(disp, pool=pool)
        elif self.moboInfo['acquifunc'].lower() == 'parego':
            self.paregoupdate(disp, pool=pool)
        else:
            raise ValueError(f"{self.moboInfo['acquifunc']} is not a valid "
                             f"acquisition function.")

        # Finish optimization and return values
        if disp:
            print("Optimization finished, now creating the final outputs.")

        xupdate = self.Xall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        yupdate = self.yall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        supdate = self.spredall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        metricall = self.metricall

        return xupdate, yupdate, supdate, metricall

    def ehviupdate(self, disp=True, pool=None):
        """
        Update MOBO using EHVI algorithm.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             None
        """
        self.spredall = deepcopy(self.yall)
        self.spredall[:] = 0
        while self.nup < self.moboInfo['nup']:
            # Iteratively update the reference point for hypervolume computation
            # if EHVI is used as the acquisition function
            if self.moboInfo['refpointtype'].lower() == 'dynamic':
                rp = (np.max(self.yall, 0) +
                      (np.max(self.yall, 0) - np.min(self.yall, 0)) * 2)
                self.moboInfo['refpoint'] = rp

            # Perform update(s)
            if self.multiupdate < 1:
                raise ValueError("Number of multiple update must be > 1")
            else:
                res = self.simultpredehvi(disp=disp, pool=pool)
                xnext, yprednext, sprednext, metricnext = res

            if self.nup == 0:
                self.metricall = metricnext.reshape(-1, 1)
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if self.autoupdate is False:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                self.spredall = np.vstack((self.spredall, sprednext))
                break

            # Evaluate and enrich experimental design
            self.enrich(xnext, pool=pool)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(
                    f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall, 0)}, "
                    f"Maximum no. updates: {self.moboInfo['nup']+1}")

    def paregoupdate(self, disp=True, pool=None):
        """
        Update MOBO using ParEGO algorithm.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             None
        """
        while self.nup < self.moboInfo['nup']:
            # Perform update(s)
            if self.multiupdate < 0:
                raise ValueError(
                    "Number of multiple update must be greater or "
                    "equal to 0")
            elif self.multiupdate in (0, 1):
                x_n, met_n = run_single_opt(self.scalkrig,
                                            self.moboInfo,
                                            krigconstlist=self.krigconstlist,
                                            cheapconstlist=self.cheapconstlist,
                                            pool=pool)
                xnext = x_n
                metricnext = met_n
                yprednext = np.zeros(shape=[2])
                for ii, krigobj in enumerate(self.kriglist):
                    yprednext[ii] = krigobj.predict(xnext, ['pred'])
            else:
                xnext, yprednext, metricnext = self.simultpredparego(pool=pool)

            if self.nup == 0:
                self.metricall = metricnext.reshape(-1, 1)
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if not self.autoupdate:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                break

            # Evaluate and enrich experimental design
            self.enrich(xnext, pool=pool)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(f"Update no.: {self.nup+1}, "
                      f"F-count: {np.size(self.Xall, 0)}, "
                      f"Maximum no. updates: {self.moboInfo['nup']+1}")

    def simultpredehvi(self, disp=False, pool=None):
        """
        Perform multi updates on EHVI MOBO using Kriging believer method.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
            xalltemp (np.ndarray): [n_kb, n_dv] array of update design
                variable values.
            yalltemp (np.ndarray): [n_kb, n_obj] array of update
                objective values.
            salltemp (np.ndarray): [n_kb, n_obj] array of update
                objective uncertainty values.
            metricall (np.ndarray): [n_kb, 1] array of update metric
                values.
        """
        n_krig = len(self.kriglist)
        n_dv = self.kriglist[0].KrigInfo["nvar"]

        krigtemp = [deepcopy(obj) for obj in self.kriglist]
        yprednext = np.zeros([n_krig])
        sprednext = np.zeros([n_krig])

        xalltemp = np.empty([self.multiupdate, n_dv])
        yalltemp = np.empty([self.multiupdate, n_krig])
        salltemp = np.empty([self.multiupdate, n_krig])
        metricall = np.empty([self.multiupdate, 1])

        ypartemp = self.ypar
        yall = self.yall

        for ii in range(self.multiupdate):
            t1 = time.time()
            if disp:
                print(f"update number {ii+1}")

            xnext, metrictemp = run_multi_opt(
                krigtemp,
                self.moboInfo,
                ypartemp,
                krigconstlist=self.krigconstlist,
                cheapconstlist=self.cheapconstlist,
                pool=pool)

            bound = np.vstack((-np.ones([1, n_dv]), np.ones([1, n_dv])))

            for jj, krig in enumerate(krigtemp):
                yprednext[jj], sprednext[jj] = krig.predict(
                    xnext, ['pred', 's'])
                krig.KrigInfo['X'] = np.vstack((krig.KrigInfo['X'], xnext))
                krig.KrigInfo['y'] = np.vstack(
                    (krig.KrigInfo['y'], yprednext[jj]))
                krig.standardize()
                krig.KrigInfo["F"] = compute_regression_mat(
                    krig.KrigInfo["idx"], krig.KrigInfo["X_norm"], bound,
                    np.ones([n_dv]))
                krig.KrigInfo = likelihood(krig.KrigInfo['Theta'],
                                           krig.KrigInfo,
                                           mode='all',
                                           trainvar=krig.trainvar)

            xalltemp[ii, :] = xnext[:]
            yalltemp[ii, :] = yprednext[:]
            salltemp[ii, :] = sprednext[:]
            metricall[ii, :] = metrictemp

            yall = np.vstack((yall, yprednext))
            ypartemp, _ = searchpareto.paretopoint(yall)

            if disp:
                print(f"time: {time.time() - t1:.2f} s")

        return xalltemp, yalltemp, salltemp, metricall

    def simultpredparego(self, pool=None):
        """
        Perform multi updates on ParEGO MOBO by varying the weighting function.

        Args:
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             xalltemp (nparray) : Array of design variables updates.
             yalltemp (nparray) : Array of objectives value updates.
             metricall (nparray) : Array of metric of the updates.
        """
        idxs = np.random.choice(11, self.multiupdate)
        scalinfotemp = deepcopy(self.KrigScalarizedInfo)
        xalltemp = self.Xall[:, :]
        yalltemp = self.yall[:, :]
        yprednext = np.zeros(shape=[len(self.kriglist)])

        for ii, idx in enumerate(idxs):
            print(f"update number {ii + 1}")
            scalinfotemp['X'] = xalltemp
            scalinfotemp['y'] = paregopre(yalltemp, idx)
            krigtemp = Kriging(scalinfotemp,
                               standardization=True,
                               standtype='default',
                               normy=False,
                               trainvar=False)
            krigtemp.train(disp=False, pool=pool)
            x_n, met_n = run_single_opt(krigtemp,
                                        self.moboInfo,
                                        krigconstlist=self.krigconstlist,
                                        cheapconstlist=self.cheapconstlist,
                                        pool=pool)
            xnext = x_n
            metricnext = met_n
            for jj, krigobj in enumerate(self.kriglist):
                yprednext[jj] = krigobj.predict(xnext, ['pred'])
            if ii == 0:
                xallnext = deepcopy(xnext)
                yallnext = deepcopy(yprednext)
                metricall = deepcopy(metricnext)
            else:
                xallnext = np.vstack((xallnext, xnext))
                yallnext = np.vstack((yallnext, yprednext))
                metricall = np.vstack((metricall, metricnext))

        yalltemp = np.vstack((yalltemp, yprednext))
        xalltemp = np.vstack((xalltemp, xnext))

        return xallnext, yallnext, metricall

    def enrich(self, xnext, pool=None):
        """
        Evaluate and enrich experimental design.

        Args:
            xnext: Next design variable(s) to be evaluated.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
            None
        """
        # Evaluate new sample
        obj_krig_problem = self.kriglist[0].KrigInfo['problem']
        if isinstance(obj_krig_problem, str):
            if np.ndim(xnext) == 1:
                ynext = evaluate(xnext, obj_krig_problem)
            else:
                ynext = np.zeros(shape=[np.size(xnext, 0), len(self.kriglist)])
                for ii in range(np.size(xnext, 0)):
                    ynext[ii, :] = evaluate(xnext[ii, :], obj_krig_problem)
        elif callable(obj_krig_problem):
            ynext = obj_krig_problem(xnext)
        else:
            raise ValueError('KrigInfo["problem"] is not a string nor a '
                             'callable function!')

        if self.krigconstlist is not None:
            for idx, constobj in enumerate(self.krigconstlist):
                con_krig_problem = constobj.KrigInfo['problem']
                if isinstance(con_krig_problem, str):
                    ynext_const = evaluate(xnext, con_krig_problem)
                elif callable(con_krig_problem):
                    ynext_const = con_krig_problem(xnext).reshape(-1, 1)
                else:
                    raise ValueError(
                        'KrigConstInfo["problem"] is not a string '
                        'nor a callable function!')
                constobj.KrigInfo['X'] = np.vstack(
                    (constobj.KrigInfo['X'], xnext))
                constobj.KrigInfo['y'] = np.vstack(
                    (constobj.KrigInfo['y'], ynext_const))
                constobj.standardize()
                constobj.train(disp=False, pool=pool)

        # Treatment for failed solutions, Reference : "Forrester, A. I., Sóbester, A., & Keane, A. J. (2006). Optimization with missing data.
        # Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 462(2067), 935-945."
        if np.isnan(ynext).any():
            for jj in range(len(self.kriglist)):
                SSqr, y_hat = self.kriglist[jj].predict(
                    xnext, ['SSqr', 'pred'])
                ynext[0, jj] = y_hat + SSqr

        # Enrich experimental design
        self.yall = np.vstack((self.yall, ynext))
        self.Xall = np.vstack((self.Xall, xnext))
        self.ypar, I = searchpareto.paretopoint(
            self.yall)  # Recompute non-dominated solutions

        if self.moboInfo['acquifunc'].lower().startswith('ehvi'):
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False, pool=pool)
        elif self.moboInfo['acquifunc'] == 'parego':
            self.KrigScalarizedInfo['X'] = self.Xall
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False, pool=pool)
            for index, krigobj in enumerate(self.kriglist):
                krigobj.KrigInfo['X'] = self.Xall
                krigobj.KrigInfo['y'] = self.yall[:, index].reshape(-1, 1)
                krigobj.standardize()
                krigobj.train(disp=False, pool=pool)
        else:
            raise NotImplementedError(self.moboInfo["acquifunc"],
                                      " is not a valid acquisition function.")

        # Save data
        if self.savedata:
            I = I.astype(int)
            Xbest = self.Xall[I, :]
            sio.savemat(self.moboInfo["filename"], {
                "xbest": Xbest,
                "ybest": self.ypar
            })
Пример #16
0
    def _run(self, disp=True, infeasible=None, pool=None):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            infeasible (np.ndarray, optional): Indices of infeasible
                samples to delete. Defaults to None.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
            xalltemp (np.ndarray): [n_kb, n_dv] array of update design
                variable values.
            yalltemp (np.ndarray): [n_kb, n_obj] array of update
                objective values.
            salltemp (np.ndarray): [n_kb, n_obj] array of update
                objective uncertainty values.
            metricall (np.ndarray): [n_kb, 1] array of update metric
                values.
        """
        self.nup = 0  # Number of current iteration
        self.Xall = self.kriglist[0].KrigInfo['X']
        n_samp = self.kriglist[0].KrigInfo["nsamp"]
        n_krig = len(self.kriglist)
        self.yall = np.zeros([n_samp, n_krig])
        for ii in range(n_krig):
            self.yall[:, ii] = self.kriglist[ii].KrigInfo["y"][:, 0]

        if infeasible is not None:
            self.yall = np.delete(self.yall.copy(), infeasible, 0)
            self.Xall = np.delete(self.Xall.copy(), infeasible, 0)

        self.ypar, _ = searchpareto.paretopoint(self.yall)

        print("Begin multi-objective Bayesian optimization process.")
        if self.autoupdate and disp:
            print(f"Update no.: {self.nup + 1}, F-count: {n_samp}, "
                  f"Maximum no. updates: {self.moboInfo['nup'] + 1}")

        # If the optimizer is ParEGO, create a scalarized Kriging
        if self.moboInfo['acquifunc'].lower() == 'parego':
            self.KrigScalarizedInfo = deepcopy(self.kriglist[0].KrigInfo)
            self.KrigScalarizedInfo['y'] = paregopre(self.yall)
            self.scalkrig = Kriging(self.KrigScalarizedInfo,
                                    standardization=True,
                                    standtype='default',
                                    normy=False,
                                    trainvar=False)
            self.scalkrig.train(disp=False, pool=pool)

        # Perform update on design space
        if self.moboInfo['acquifunc'].lower().startswith('ehvi'):
            self.ehviupdate(disp, pool=pool)
        elif self.moboInfo['acquifunc'].lower() == 'parego':
            self.paregoupdate(disp, pool=pool)
        else:
            raise ValueError(f"{self.moboInfo['acquifunc']} is not a valid "
                             f"acquisition function.")

        # Finish optimization and return values
        if disp:
            print("Optimization finished, now creating the final outputs.")

        xupdate = self.Xall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        yupdate = self.yall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        supdate = self.spredall[(-self.moboInfo['nup'] * self.multiupdate):, :]
        metricall = self.metricall

        return xupdate, yupdate, supdate, metricall