Exemplo n.º 1
0
    def paregoupdate(self, disp=True, pool=None):
        """
        Update MOBO using ParEGO algorithm.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             None
        """
        while self.nup < self.moboInfo['nup']:
            # Perform update(s)
            if self.multiupdate < 0:
                raise ValueError(
                    "Number of multiple update must be greater or "
                    "equal to 0")
            elif self.multiupdate in (0, 1):
                x_n, met_n = run_single_opt(self.scalkrig,
                                            self.moboInfo,
                                            krigconstlist=self.krigconstlist,
                                            cheapconstlist=self.cheapconstlist,
                                            pool=pool)
                xnext = x_n
                metricnext = met_n
                yprednext = np.zeros(shape=[2])
                for ii, krigobj in enumerate(self.kriglist):
                    yprednext[ii] = krigobj.predict(xnext, ['pred'])
            else:
                xnext, yprednext, metricnext = self.simultpredparego(pool=pool)

            if self.nup == 0:
                self.metricall = metricnext.reshape(-1, 1)
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if not self.autoupdate:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                break

            # Evaluate and enrich experimental design
            self.enrich(xnext, pool=pool)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(f"Update no.: {self.nup+1}, "
                      f"F-count: {np.size(self.Xall, 0)}, "
                      f"Maximum no. updates: {self.moboInfo['nup']+1}")
Exemplo n.º 2
0
    def simultpredparego(self, pool=None):
        """
        Perform multi updates on ParEGO MOBO by varying the weighting function.

        Args:
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
             xalltemp (nparray) : Array of design variables updates.
             yalltemp (nparray) : Array of objectives value updates.
             metricall (nparray) : Array of metric of the updates.
        """
        idxs = np.random.choice(11, self.multiupdate)
        scalinfotemp = deepcopy(self.KrigScalarizedInfo)
        xalltemp = self.Xall[:, :]
        yalltemp = self.yall[:, :]
        yprednext = np.zeros(shape=[len(self.kriglist)])

        for ii, idx in enumerate(idxs):
            print(f"update number {ii + 1}")
            scalinfotemp['X'] = xalltemp
            scalinfotemp['y'] = paregopre(yalltemp, idx)
            krigtemp = Kriging(scalinfotemp,
                               standardization=True,
                               standtype='default',
                               normy=False,
                               trainvar=False)
            krigtemp.train(disp=False, pool=pool)
            x_n, met_n = run_single_opt(krigtemp,
                                        self.moboInfo,
                                        krigconstlist=self.krigconstlist,
                                        cheapconstlist=self.cheapconstlist,
                                        pool=pool)
            xnext = x_n
            metricnext = met_n
            for jj, krigobj in enumerate(self.kriglist):
                yprednext[jj] = krigobj.predict(xnext, ['pred'])
            if ii == 0:
                xallnext = deepcopy(xnext)
                yallnext = deepcopy(yprednext)
                metricall = deepcopy(metricnext)
            else:
                xallnext = np.vstack((xallnext, xnext))
                yallnext = np.vstack((yallnext, yprednext))
                metricall = np.vstack((metricall, metricnext))

        yalltemp = np.vstack((yalltemp, yprednext))
        xalltemp = np.vstack((xalltemp, xnext))

        return xallnext, yallnext, metricall
Exemplo n.º 3
0
    def paregoupdate(self, disp):
        """
        Update MOBO using ParEGO algorithm.

        Args:
            disp (bool): Display process or not.

        Returns:
             None
        """
        while self.nup < self.moboInfo['nup']:
            # Perform update(s)
            if self.multiupdate < 0:
                raise ValueError(
                    "Number of multiple update must be greater or equal to 0")
            elif self.multiupdate == 0 or self.multiupdate == 1:
                xnext, metricnext = run_single_opt(self.scalkrig,
                                                   self.moboInfo,
                                                   self.krigconstlist,
                                                   self.cheapconstlist)
                yprednext = np.zeros(shape=[2])
                for ii, krigobj in enumerate(self.kriglist):
                    yprednext[ii] = krigobj.predict(xnext, ['pred'])
            else:
                xnext, yprednext, metricnext = self.simultpredparego()

            if self.nup == 0:
                self.metricall = metricnext
            else:
                self.metricall = np.vstack((self.metricall, metricnext))

            # Break Loop if auto is false
            if self.autoupdate is False:
                self.Xall = np.vstack((self.Xall, xnext))
                self.yall = np.vstack((self.yall, yprednext))
                break
            else:
                pass

            # Evaluate and enrich experimental design
            self.enrich(xnext)

            # Update number of iterations
            self.nup += 1

            # Show optimization progress
            if disp:
                print(
                    f"Update no.: {self.nup+1}, F-count: {np.size(self.Xall, 0)}, "
                    f"Maximum no. updates: {self.moboInfo['nup']+1}")
Exemplo n.º 4
0
    def run(self, disp=True):
        """
        Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool): Display process or not. Defaults to True

        Returns:
            xupdate (nparray): Array of design variables updates.
            yupdate (nparray): Array of objectives updates
        """
        self.nup = 0  # Number of current iteration
        self.Xall = self.krigobj.KrigInfo['X']
        self.yall = self.krigobj.KrigInfo['y']
        self.yhist = np.array([np.min(self.yall)])
        self.istall = 0

        print("Begin single-objective Bayesian optimization process.")
        while self.nup < self.soboInfo['nup']:

            if self.autoupdate and disp:
                print(f"Update no.: {self.nup + 1}, F-count: {np.size(self.Xall, 0)}, "
                      f"Best f(x): {self.yhist[self.nup]}, Stall counter: {self.istall}")
            else:
                pass

            # Find next suggested point
            self.xnext, self.metricnext = run_single_opt(self.krigobj,self.soboInfo,self.krigconstlist,self.cheapconstlist)

            # Break Loop if autoupdate is False
            if self.autoupdate is False:
                break
            else:
                pass

            # Evaluate response for next decision variable
            if type(self.krigobj.KrigInfo['problem']) == str:
                self.ynext = evaluate(self.xnext,self.krigobj.KrigInfo['problem'])
            elif callable(self.krigobj.KrigInfo['problem']):
                self.ynext = self.krigobj.KrigInfo['problem'](self.xnext)

            # Treatment for failed solutions, Reference : "Forrester, A. I., Sóbester, A., & Keane, A. J. (2006). Optimization with missing data.
            # Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 462(2067), 935-945."
            if np.isnan(self.ynext).any() is True:
                SSqr, y_hat = self.krigobj.predict(self.xnext, ['SSqr', 'pred'])
                self.ynext = y_hat + SSqr

            # Enrich experimental design
            self.krigobj.KrigInfo['X'] = np.vstack((self.krigobj.KrigInfo['X'], self.xnext))
            self.krigobj.KrigInfo['y'] = np.vstack((self.krigobj.KrigInfo['y'], self.ynext))

            # Re-train Kriging model
            self.krigobj.standardize()
            self.krigobj.train(disp=False)

            if self.nup == 0:
                self.xupdate = deepcopy(self.xnext)
                self.yupdate = deepcopy(self.ynext)
            else:
                self.xupdate = np.vstack((self.xupdate,self.xnext))
                self.yupdate = np.vstack((self.yupdate,self.ynext))

            self.nup += 1
            self.yhist = np.vstack((self.yhist, np.min(self.krigobj.KrigInfo['y'])))

            # Check stall iteration
            if self.yhist[self.nup,0] == self.yhist[self.nup-1,0]:
                self.istall += 1
                if self.istall == self.soboInfo['stalliteration']:
                    break
                else:
                    pass
            else:
                self.istall = 0

        print("Optimization finished, now creating the final outputs.")
        y_opt = np.min(self.krigobj.KrigInfo['y'])
        min_pos = np.argmin(self.krigobj.KrigInfo['y'])
        x_opt = self.krigobj.KrigInfo['X'][min_pos,:]
        if self.autoupdate:
            return x_opt,y_opt
        else:
            return self.xnext,self.ynext
Exemplo n.º 5
0
    def _run(self, disp=True, pool=None):
        """Run multi objective unconstrained Bayesian optimization.

        Args:
            disp (bool, optional): Print progress. Defaults to True.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.

        Returns:
            xupdate (np.ndarray): Matrix of updated samples after
                optimization.
            yupdate (np.ndarray): Response matrix of updated sample
                solutions after optimization.
        """
        self.nup = 0  # Number of current iteration
        self.Xall = self.krigobj.KrigInfo["X"]
        self.yall = self.krigobj.KrigInfo["y"]
        self.yhist = np.array([np.min(self.yall)])
        self.istall = 0

        print("Begin single-objective Bayesian optimization process.")
        while self.nup < self.soboInfo["nup"]:

            if self.autoupdate and disp:
                print(
                    f"Update no.: {self.nup + 1}, F-count: {np.size(self.Xall, 0)}, "
                    f"Best f(x): {self.yhist[self.nup]}, Stall counter: {self.istall}"
                )
            else:
                pass

            # Find next suggested point
            x_n, metric_n = run_single_opt(self.krigobj,
                                           self.soboInfo,
                                           krigconstlist=self.krigconstlist,
                                           cheapconstlist=self.cheapconstlist,
                                           pool=pool)
            self.xnext = x_n
            self.metricnext = metric_n

            # Break Loop if autoupdate is False
            if self.autoupdate is False:
                break
            else:
                pass

            # Evaluate response for next decision variable
            obj_krig_problem = self.krigobj.KrigInfo["problem"]
            if isinstance(obj_krig_problem, str):
                self.ynext = evaluate(self.xnext, obj_krig_problem)
            elif callable(obj_krig_problem):
                self.ynext = obj_krig_problem(self.xnext)
            else:
                raise ValueError('KrigInfo["problem"] is not a string nor a '
                                 'callable function!')

            # Treatment for failed solutions, Reference : "Forrester, A. I., Sóbester, A., & Keane, A. J. (2006). Optimization with missing data.
            # Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 462(2067), 935-945."
            if np.isnan(self.ynext).any():
                SSqr, y_hat = self.krigobj.predict(self.xnext,
                                                   ["SSqr", "pred"])
                self.ynext = y_hat + SSqr

            # Enrich experimental design
            self.krigobj.KrigInfo["X"] = np.vstack(
                (self.krigobj.KrigInfo["X"], self.xnext))
            self.krigobj.KrigInfo["y"] = np.vstack(
                (self.krigobj.KrigInfo["y"], self.ynext))

            # Re-train Kriging model
            self.krigobj.standardize()
            self.krigobj.train(disp=False, pool=pool)

            if self.nup == 0:
                self.xupdate = deepcopy(self.xnext)
                self.yupdate = deepcopy(self.ynext)
            else:
                self.xupdate = np.vstack((self.xupdate, self.xnext))
                self.yupdate = np.vstack((self.yupdate, self.ynext))

            self.nup += 1
            self.yhist = np.vstack(
                (self.yhist, np.min(self.krigobj.KrigInfo["y"])))

            # Check stall iteration
            if self.yhist[self.nup, 0] == self.yhist[self.nup - 1, 0]:
                self.istall += 1
                if self.istall == self.soboInfo["stalliteration"]:
                    break
            else:
                self.istall = 0

        print("Optimization finished, now creating the final outputs.")
        y_opt = np.min(self.krigobj.KrigInfo["y"])
        min_pos = np.argmin(self.krigobj.KrigInfo["y"])
        x_opt = self.krigobj.KrigInfo["X"][min_pos, :]
        if self.autoupdate:
            return x_opt, y_opt
        else:
            return self.xnext, self.ynext