Ejemplo n.º 1
0
    def __init__(self,
                 nvar,
                 krigobj=None,
                 problem=None,
                 ub=None,
                 lb=None,
                 nMC=2e5):
        """
        Initialize model

        Args:
            nvar (int):  Number of variables.
            krigobj (object): Kriging object, default to None.
            problem (str/callable/None): problem type, default to None.
        """
        self.nvar = nvar
        self.krigobj = krigobj
        self.problem = problem
        self.n = int(nMC)
        if ub is not None and lb is not None:
            _, init_mat = sampling("sobolnew",
                                   self.nvar * 2,
                                   self.n,
                                   result="real",
                                   upbound=ub,
                                   lobound=lb)
        else:
            init_mat, _ = sampling("sobolnew", self.nvar * 2, self.n)
        self.A = init_mat[:, :self.nvar]
        self.B = init_mat[:, self.nvar:]
        del init_mat
        self.ya = None
        self.yb = None
        self.fo_2 = None
        self.denom = None
Ejemplo n.º 2
0
def generate_kriging(n_cpu):
    # Initialization
    KrigInfo = dict()
    kernel = ["gaussian"]
    # Sampling
    nsample = 40
    nvar = 2
    ub = np.array([5, 5])
    lb = np.array([-5, -5])
    nup = 3
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    y1 = evaluate(X, "styblinski")

    # Initialize KrigInfo
    KrigInfo = initkriginfo()
    # Set KrigInfo
    KrigInfo["X"] = X
    KrigInfo["y"] = y1
    KrigInfo["nvar"] = nvar
    KrigInfo["problem"] = "styblinski"
    KrigInfo["nsamp"] = nsample
    KrigInfo["nrestart"] = 7
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["kernel"] = kernel
    KrigInfo["TrendOrder"] = 0
    KrigInfo["nugget"] = -6
    # KrigInfo["n_princomp"] = 1
    KrigInfo["kernel"] = ["gaussian"]
    KrigInfo["nkernel"] = len(KrigInfo["kernel"])
    KrigInfo["optimizer"] = "lbfgsb"

    # Run Kriging
    t = time.time()
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype="default",
                      normy=False,
                      trainvar=False)
    krigobj.train(n_cpu=n_cpu)
    loocverr, _ = krigobj.loocvcalc()
    elapsed = time.time() - t
    print("elapsed time for train Kriging model: ", elapsed, "s")
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj
Ejemplo n.º 3
0
def generate_kriging(n_cpu):
    # Sampling
    nsample = 20
    nvar = 2
    nobj = 2
    lb = -1 * np.ones(shape=[nvar])
    ub = 1 * np.ones(shape=[nvar])
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    global y
    y = myproblem(X)

    # Initialize KrigInfo
    KrigInfo1 = initkriginfo()
    # Set KrigInfo
    KrigInfo1["X"] = X
    KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
    KrigInfo1["problem"] = myproblem
    KrigInfo1["nrestart"] = 5
    KrigInfo1["ub"] = ub
    KrigInfo1["lb"] = lb
    KrigInfo1["optimizer"] = "lbfgsb"

    # Initialize KrigInfo
    KrigInfo2 = deepcopy(KrigInfo1)
    KrigInfo2['y'] = y[:, 1].reshape(-1, 1)

    # Run Kriging
    krigobj1 = Kriging(KrigInfo1,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj1.train(n_cpu=n_cpu)
    loocverr1, _ = krigobj1.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr1, "%")

    krigobj2 = Kriging(KrigInfo2,
                       standardization=True,
                       standtype='default',
                       normy=False,
                       trainvar=False)
    krigobj2.train(n_cpu=n_cpu)
    loocverr2, _ = krigobj2.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr2, "%")

    return krigobj1, krigobj2
Ejemplo n.º 4
0
def predictkrig(krigobj):
    nsample = 25
    nvar = 2
    ub = np.array([5, 5])
    lb = np.array([-5, -5])
    nup = 3
    sampoption = "halton"

    # Test Kriging Output
    neval = 10000
    samplenormout, sampleeval = sampling(sampoption,
                                         nvar,
                                         neval,
                                         result="real",
                                         upbound=ub,
                                         lobound=lb)
    xx = np.linspace(-5, 5, 100)
    yy = np.linspace(-5, 5, 100)
    Xevalx, Xevaly = np.meshgrid(xx, yy)
    Xeval = np.zeros(shape=[neval, 2])
    Xeval[:, 0] = np.reshape(Xevalx, (neval))
    Xeval[:, 1] = np.reshape(Xevaly, (neval))

    # Evaluate output
    yeval = np.zeros(shape=[neval, 1])
    yact = np.zeros(shape=[neval, 1])
    yeval, ssqr = krigobj.predict(Xeval, ["pred", "ssqr"])
    yact = evaluate(Xeval, "styblinski")
    hasil = np.hstack((yeval, yact))

    # Evaluate RMSE
    subs = np.transpose((yact - yeval))
    subs1 = np.transpose((yact - yeval) / yact)
    RMSE = np.sqrt(np.sum(subs**2) / neval)
    RMSRE = np.sqrt(np.sum(subs1**2) / neval)
    MAPE = 100 * np.sum(abs(subs1)) / neval
    print("RMSE = ", RMSE)
    print("RMSRE = ", RMSRE)
    print("MAPE = ", MAPE, "%")

    yeval1 = np.reshape(yeval, (100, 100))
    x1eval = np.reshape(Xeval[:, 0], (100, 100))
    x2eval = np.reshape(Xeval[:, 1], (100, 100))
    fig = plt.figure()
    ax = fig.gca(projection="3d")
    surf = ax.plot_surface(x1eval,
                           x2eval,
                           yeval1,
                           cmap=cm.coolwarm,
                           linewidth=0,
                           antialiased=False)
    plt.show()
Ejemplo n.º 5
0
def generate_kriging(n_cpu):
    # Sampling
    nsample = 10
    nvar = 2
    lb = np.array([-5, -5])
    ub = np.array([5, 5])
    sampoption = "halton"
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample
    # Evaluate sample
    # global y
    y = evaluate(X, "styblinski")

    # Initialize KrigInfo
    # global KrigInfo
    KrigInfo = initkriginfo()
    # Set KrigInfo
    KrigInfo["X"] = X
    KrigInfo["y"] = y
    KrigInfo["problem"] = "styblinski"
    KrigInfo["nrestart"] = 5
    KrigInfo["ub"] = ub
    KrigInfo["lb"] = lb
    KrigInfo["optimizer"] = "lbfgsb"

    # Run Kriging
    krigobj = Kriging(KrigInfo,
                      standardization=True,
                      standtype='default',
                      normy=False,
                      trainvar=False)
    krigobj.train(n_cpu=n_cpu)
    loocverr, _ = krigobj.loocvcalc()
    print("LOOCV error of Kriging model: ", loocverr, "%")

    return krigobj
Ejemplo n.º 6
0
    def train(self, parallel=False, disp=True, KPCAkernel="poly"):
        """
        Train Kriging model

        Args:
            parallel (bool): Parallel processing or not. Default to False.
            disp (bool): Display process or not. Default to True.

        Returns:
            None
        """
        # Create starting points
        if KPCAkernel.lower() == 'poly':
            upwstart = np.array([2.5, 2.5, 7])
            lowwstart = np.array([-1, -1, 1])
            _, wstart = sampling('sobol',
                                 len(upwstart),
                                 1,
                                 result="real",
                                 upbound=upwstart,
                                 lobound=lowwstart)
            # wstart = np.array([0,0,0])
        elif KPCAkernel.lower() == 'sigmoid':
            upwstart = np.array([2.5, 2.5])
            lowwstart = np.array([-2, -2])
            wstart = np.array([0, 0])
        elif KPCAkernel.lower() == 'rbf':
            upwstart = np.array([1.5])
            lowwstart = np.array([-0.5])
            wstart = np.array([0])
        elif KPCAkernel.lower() == 'gaussian':
            upwstart = np.array([1.5] * self.KrigInfo["nvar"])
            lowwstart = np.array([-0.5] * self.KrigInfo["nvar"])
            wstart = np.array([0] * self.KrigInfo["nvar"])
        elif KPCAkernel.lower() == 'linear':
            upwstart = np.array([2.5])
            lowwstart = np.array([-2])
            wstart = np.array([0])
        else:
            raise ValueError(KPCAkernel.lower() +
                             " kernel option is not a valid kernel")

        # Define hyperparams bounds
        optimbound = np.transpose(np.vstack((lowwstart, upwstart)))

        # Run optimization
        print("Optimize Hyperparams")
        if self.standardization is True:
            original_X = self.KrigInfo['X_norm']
            self.KrigInfo['orig_X'] = original_X
        else:
            original_X = self.KrigInfo['X']
            self.KrigInfo['orig_X'] = original_X
        res = minimize(self.kpcaopt,
                       wstart,
                       method='L-BFGS-B',
                       options={
                           'maxfun': 50,
                           'eps': 1e-4
                       },
                       bounds=optimbound,
                       args=(KPCAkernel, original_X))
        wopt = res.x

        drm, loocverr = self.kpcaopt(wopt, KPCAkernel, original_X, out='all')
        self.KrigInfo['kpcaw'] = wopt
        return drm, loocverr
Ejemplo n.º 7
0
    def train(self, n_cpu=1, disp=True, pre_theta=None, pool=None):
        """
        Train Kriging model
        
        Args:
            n_cpu (bool): If > 1, uses parallel processing. Defaults
                to 1.
            disp (bool, optional): Print updates. Defaults to False.
            pre_theta #TODO: document this, if working.
            pool (int, optional): A multiprocessing.Pool instance.
                Will be passed to functions for use, if specified.
                Defaults to None.
        Returns:
            None
        """
        if disp:
            print("Begin train hyperparam.")

        # Isotropic gaussian kernel
        if self.KrigInfo["kernel"] == ["iso_gaussian"
                                       ]:  # TODO: Should this be in a list?
            self.nbhyp = 1
        elif len(self.KrigInfo["kernel"]
                 ) != 1 and "iso_gaussian" in self.KrigInfo["kernel"]:
            raise NotImplementedError(
                "Isotropic Gaussian kernel is not available for composite kernel"
            )
        else:
            if len(self.KrigInfo["ubhyp"]) != self.nbhyp:
                self.nbhyp = len(self.KrigInfo["ubhyp"])

        # Create multiple starting points
        if self.KrigInfo['nrestart'] < 1:
            xhyp = self.nbhyp * [0]
        else:
            if self.nbhyp <= 40:
                _, xhyp = sampling('sobol',
                                   len(self.KrigInfo["ubhyp"]),
                                   self.KrigInfo['nrestart'],
                                   result="real",
                                   upbound=self.KrigInfo["ubhyp"],
                                   lobound=self.KrigInfo["lbhyp"])
            else:
                _, xhyp = sampling('sobolnew',
                                   len(self.KrigInfo["ubhyp"]),
                                   self.KrigInfo['nrestart'],
                                   result="real",
                                   upbound=self.KrigInfo["ubhyp"],
                                   lobound=self.KrigInfo["lbhyp"])

        # multiple starting from pre-trained theta:
        if pre_theta is not None:
            xhyp = np.random.rand(self.KrigInfo['nrestart'] - 1,
                                  len(self.KrigInfo["ubhyp"])) + pre_theta
            xhyp = np.vstack((pre_theta, xhyp))

        # Optimize hyperparam if number of hyperparameter is 1 using golden section method
        if self.nbhyp == 1:
            if self.KrigInfo["optimizer"] != "ga":
                res = minimize_scalar(likelihood,
                                      bounds=(self.lb, self.ub),
                                      method='golden',
                                      args=(self.KrigInfo, 'default',
                                            self.trainvar))
                if self.KrigInfo["kernel"] == ["iso_gaussian"]:
                    best_x = res.x
                else:
                    best_x = np.array([res.x])
                neglnlikecand = likelihood(best_x,
                                           self.KrigInfo,
                                           trainvar=self.trainvar)
            else:
                best_x, neglnlikecand, _ = uncGA(likelihood,
                                                 lb=self.lb,
                                                 ub=self.ub,
                                                 npop=100,
                                                 maxg=100,
                                                 args=(self.KrigInfo,
                                                       'default',
                                                       self.trainvar))
            if disp:
                print(f"Best hyperparameter is {best_x}")
                print(f"With NegLnLikelihood of {neglnlikecand}")
        else:
            # Set Bounds and Constraints for Optimizer
            # Set Bounds for LBSGSB or SLSQP if one is used.
            if self.KrigInfo["optimizer"] == "lbfgsb" or self.KrigInfo[
                    "optimizer"] == "slsqp":
                optimbound = np.transpose(
                    np.vstack(
                        (self.KrigInfo["lbhyp"], self.KrigInfo["ubhyp"])))
            # Set Constraints for Cobyla if used
            elif self.KrigInfo["optimizer"] == "cobyla":
                optimbound = []
                for i in range(len(self.KrigInfo["ubhyp"])):
                    # params aa and bb are not used, just to avoid error in Cobyla optimizer
                    optimbound.append(lambda x, Kriginfo, aa, bb, itemp=i: x[
                        itemp] - self.KrigInfo["lbhyp"][itemp])
                    optimbound.append(lambda x, Kriginfo, aa, bb, itemp=i: self
                                      .KrigInfo["ubhyp"][itemp] - x[itemp])
            else:
                optimbound = None

            if disp:
                print(
                    f"Training {self.KrigInfo['nrestart']} hyperparameter(s)")

            # Train hyperparams
            bestxcand, neglnlikecand = self.parallelopt(xhyp,
                                                        n_cpu,
                                                        optimbound,
                                                        disp=disp,
                                                        pool=pool)

            # Search best hyperparams among the candidates
            I = np.argmin(neglnlikecand)
            best_x = bestxcand[I, :]

            if disp:
                print("Single Objective, train hyperparam, end.")
                print(f"Best hyperparameter is {best_x}")
                print(f"With NegLnLikelihood of {neglnlikecand[I]}")

            # Calculate Kriging model based on the best hyperparam.

        self.KrigInfo = likelihood(best_x,
                                   self.KrigInfo,
                                   mode='all',
                                   trainvar=self.trainvar)
Ejemplo n.º 8
0
                multiupdate=5,
                expconst=expconstlist,
                chpconst=cheapconstlist)
    xupdate, yupdate, supdate, metricall = mobo.run(disp=True)
    return xupdate, yupdate, supdate, metricall


if __name__ == '__main__':
    nsample = 20
    nvar = 2
    lb = np.array([0, 0])
    ub = np.array([5, 3])
    sampoption = "halton"
    n_cpu = 12
    samplenorm, sample = sampling(sampoption,
                                  nvar,
                                  nsample,
                                  result="real",
                                  upbound=ub,
                                  lobound=lb)
    X = sample

    # Evaluate function
    y = cust_func(X)
    g = exp_const_eval(X)

    # Create Kriging
    kriglist, expconstlist = construct_krig(X, y, g, lb, ub, n_cpu)

    # Optimize
    xupdate, yupdate, supdate, metricall = optimize(kriglist, expconstlist)