Beispiel #1
0
 def _compute_pls(self, X, y):
     _pls = pls(self.options["n_comp"])
     # As of sklearn 0.24.1 zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
     # For now the try/except below is a workaround to restore the 0.23 behaviour
     try:
         self.coeff_pls = _pls.fit(X.copy(), y.copy()).x_rotations_
     except StopIteration:
         self.coeff_pls = np.zeros((X.shape[1], self.options["n_comp"]))
     return X, y
Beispiel #2
0
    def _new_train_init(self):
        if self.name in ["MFKPLS", "MFKPLSK"]:
            _pls = pls(self.options["n_comp"])

            # As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
            # For now the try/except below is a workaround to restore the 0.23 behaviour
            try:
                # PLS is done on the highest fidelity identified by the key None
                self.m_pls = _pls.fit(
                    self.training_points[None][0][0].copy(),
                    self.training_points[None][0][1].copy(),
                )
                self.coeff_pls = self.m_pls.x_rotations_
            except StopIteration:
                self.coeff_pls = np.zeros(
                    self.training_points[None][0][0].shape[1],
                    self.options["n_comp"])

        xt = []
        yt = []
        i = 0
        while self.training_points.get(i, None) is not None:
            xt.append(self.training_points[i][0][0])
            yt.append(self.training_points[i][0][1])
            i = i + 1
        xt.append(self.training_points[None][0][0])
        yt.append(self.training_points[None][0][1])

        self._check_list_structure(xt, yt)
        self._check_param()
        X = self.X
        y = self.y

        _, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
            np.concatenate(xt, axis=0), np.concatenate(yt, axis=0))

        nlevel = self.nlvl

        # initialize lists
        self.optimal_noise_all = nlevel * [0]
        self.D_all = nlevel * [0]
        self.F_all = nlevel * [0]
        self.p_all = nlevel * [0]
        self.q_all = nlevel * [0]
        self.optimal_rlf_value = nlevel * [0]
        self.optimal_par = nlevel * [{}]
        self.optimal_theta = nlevel * [0]
        self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
        self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
    def standardize(self):
        """
        Standardize Kriging samples and create regression matrix.

        Returns:
            None
        """
        Kriging.standardize(self)

        # Calculate PLS coeff
        _pls = pls(self.n_princomp)
        if self.standardization is True:
            coeff_pls = _pls.fit(self.KrigInfo["X_norm"].copy(), self.KrigInfo['y'].copy()).x_rotations_
        else:
            coeff_pls = _pls.fit(self.KrigInfo["X"].copy(), self.KrigInfo['y'].copy()).x_rotations_
        self.KrigInfo["plscoeff"] = coeff_pls
Beispiel #4
0
    def _new_train_init(self):
        xt = []
        yt = []
        i = 0
        _pls = pls(self.options["n_comp"])
        # PLS done on the highest fidelity identified by the key None
        self.m_pls = _pls.fit(
            self.training_points[None][0][0].copy(),
            self.training_points[None][0][1].copy(),
        )
        self.coeff_pls = self.m_pls.x_rotations_

        while self.training_points.get(i, None) is not None:
            xt.append(self.training_points[i][0][0])
            yt.append(self.training_points[i][0][1])
            i = i + 1
        xt.append(self.training_points[None][0][0])
        yt.append(self.training_points[None][0][1])

        self._check_list_structure(xt, yt)
        self._check_param()
        X = self.X
        y = self.y

        _, _, self.X_mean, self.y_mean, self.X_std, self.y_std = standardization(
            np.concatenate(xt, axis=0), np.concatenate(yt, axis=0))

        #         self.X_mean, self.y_mean, self.X_std, \
        #             self.y_std = 0.,0.,1.,1.

        nlevel = self.nlvl

        # initialize lists
        self.noise = nlevel * [0]
        self.D_all = nlevel * [0]
        self.F_all = nlevel * [0]
        self.p_all = nlevel * [0]
        self.q_all = nlevel * [0]
        self.optimal_rlf_value = nlevel * [0]
        self.optimal_par = nlevel * [{}]
        self.optimal_theta = nlevel * [0]
        self.X_norma_all = [(x - self.X_mean) / self.X_std for x in X]
        self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
Beispiel #5
0
    def _new_train(self):
        """
        Overrides KrgBased implementation
        Trains the Multi-Fidelity model
        """

        xt = []
        yt = []
        i = 0
        _pls = pls(self.options['n_comp'])
        self.m_pls = _pls.fit(self.training_points[None][0][0].copy(),
                              self.training_points[None][0][1].copy())
        self.coeff_pls = self.m_pls.x_rotations_

        while (self.training_points.get(i, None) is not None):
            xt.append(self.training_points[i][0][0])
            yt.append(self.training_points[i][0][1])
            i = i + 1
        xt.append(self.training_points[None][0][0])
        yt.append(self.training_points[None][0][1])

        self._check_list_structure(xt, yt)
        self._check_param()
        X = self.X
        y = self.y

        _, _, self.X_mean, self.y_mean, self.X_std, \
            self.y_std = standardization(np.concatenate(xt,axis=0), np.concatenate(yt,axis=0))

        #         self.X_mean, self.y_mean, self.X_std, \
        #             self.y_std = 0.,0.,1.,1.

        nlevel = self.nlvl
        n_samples = self.nt_all

        # initialize lists
        self.noise = nlevel * [0]
        self.D_all = nlevel * [0]
        self.F_all = nlevel * [0]
        self.p_all = nlevel * [0]
        self.q_all = nlevel * [0]
        self.optimal_rlf_value = nlevel * [0]
        self.optimal_par = nlevel * [{}]
        self.optimal_theta = nlevel * [0]
        self.X_norma_all = [(x - self.X_mean) / self.X_std for x in X]
        self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]

        for lvl in range(nlevel):
            self.X_norma = self.X_norma_all[lvl]
            self.y_norma = self.y_norma_all[lvl]
            # Calculate matrix of distances D between samples
            self.D_all[lvl] = l1_cross_distances(self.X_norma)

            # Regression matrix and parameters
            self.F_all[lvl] = self.options['poly'](self.X_norma)
            self.p_all[lvl] = self.F_all[lvl].shape[1]

            # Concatenate the autoregressive part for levels > 0
            if lvl > 0:
                F_rho = self.options['rho_regr'](self.X_norma)
                self.q_all[lvl] = F_rho.shape[1]
                self.F_all[lvl] = np.hstack((F_rho * np.dot(
                    self._predict_intermediate_values(
                        self.X_norma, lvl, descale=False),
                    np.ones((1, self.q_all[lvl]))), self.F_all[lvl]))
            else:
                self.q_all[lvl] = 0

            n_samples_F_i = self.F_all[lvl].shape[0]

            if n_samples_F_i != n_samples[lvl]:
                raise Exception("Number of rows in F and X do not match. Most "
                                "likely something is going wrong with the "
                                "regression model.")

            if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
                raise Exception(
                    ("Ordinary least squares problem is undetermined "
                     "n_samples=%d must be greater than the regression"
                     " model size p+q=%d.") %
                    (n_samples[i], self.p_all[lvl] + self.q_all[lvl]))

            # Determine Gaussian Process model parameters
            self.F = self.F_all[lvl]
            D, self.ij = self.D_all[lvl]
            self._lvl = lvl
            self.nt = self.nt_all[lvl]
            self.q = self.q_all[lvl]
            self.p = self.p_all[lvl]
            self.optimal_rlf_value[lvl], self.optimal_par[lvl], self.optimal_theta[lvl] = \
                self._optimize_hyperparam(D)
            if self.options['eval_noise']:
                tmp_list = self.optimal_theta[lvl]
                self.optimal_theta[lvl] = tmp_list[:-1]
                self.noise[lvl] = tmp_list[-1]
            del self.y_norma, self.D

        if self.options['eval_noise'] and self.options['optim_var']:
            for lvl in range(self.nlvl - 1):
                self.set_training_values(X[lvl],
                                         self._predict_intermediate_values(
                                             X[lvl], lvl + 1),
                                         name=lvl)
            self.set_training_values(
                X[-1], self._predict_intermediate_values(X[-1], self.nlvl))
            self.options['eval_noise'] = False
            self._new_train()
Beispiel #6
0
    def _compute_pls(self, X, y):
        _pls = pls(self.options['n_comp'])
        self.coeff_pls = _pls.fit(X.copy(), y.copy()).x_rotations_

        return X, y
Beispiel #7
0
def ge_compute_pls(X, y, n_comp, pts, delta_x, xlimits, extra_points):

    """
    Gradient-enhanced PLS-coefficients.

    Parameters
    ----------

    X: np.ndarray [n_obs,dim]
            - - The input variables.

    y: np.ndarray [n_obs,1]
            - The output variable

    n_comp: int
            - Number of principal components used.

    pts: dict()
            - The gradient values.

    delta_x: real
            - The step used in the FOTA.

    xlimits: np.ndarray[dim, 2]
            - The upper and lower var bounds.

    extra_points: int
            - The number of extra points per each training point.

    Returns
    -------

    Coeff_pls: np.ndarray[dim, n_comp]
            - The PLS-coefficients.

    XX: np.ndarray[extra_points*nt, dim]
            - Extra points added (when extra_points > 0)

    yy: np.ndarray[extra_points*nt, 1]
            - Extra points added (when extra_points > 0)

    """
    nt, dim = X.shape
    XX = np.empty(shape=(0, dim))
    yy = np.empty(shape=(0, 1))
    _pls = pls(n_comp)

    coeff_pls = np.zeros((nt, dim, n_comp))
    for i in range(nt):
        if dim >= 3:
            sign = np.roll(bbdesign(int(dim), center=1), 1, axis=0)
            _X = np.zeros((sign.shape[0], dim))
            _y = np.zeros((sign.shape[0], 1))
            sign = sign * delta_x * (xlimits[:, 1] - xlimits[:, 0])
            _X = X[i, :] + sign
            for j in range(1, dim + 1):
                sign[:, j - 1] = sign[:, j - 1] * pts[None][j][1][i, 0]
            _y = y[i, :] + np.sum(sign, axis=1).reshape((sign.shape[0], 1))
        else:
            _X = np.zeros((9, dim))
            _y = np.zeros((9, 1))
            # center
            _X[:, :] = X[i, :].copy()
            _y[0, 0] = y[i, 0].copy()
            # right
            _X[1, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _y[1, 0] = _y[0, 0].copy() + pts[None][1][1][i, 0] * delta_x * (
                xlimits[0, 1] - xlimits[0, 0]
            )
            # up
            _X[2, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[2, 0] = _y[0, 0].copy() + pts[None][2][1][i, 0] * delta_x * (
                xlimits[1, 1] - xlimits[1, 0]
            )
            # left
            _X[3, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _y[3, 0] = _y[0, 0].copy() - pts[None][1][1][i, 0] * delta_x * (
                xlimits[0, 1] - xlimits[0, 0]
            )
            # down
            _X[4, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[4, 0] = _y[0, 0].copy() - pts[None][2][1][i, 0] * delta_x * (
                xlimits[1, 1] - xlimits[1, 0]
            )
            # right up
            _X[5, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _X[5, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[5, 0] = (
                _y[0, 0].copy()
                + pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
                + pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
            )
            # left up
            _X[6, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _X[6, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[6, 0] = (
                _y[0, 0].copy()
                - pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
                + pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
            )
            # left down
            _X[7, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _X[7, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[7, 0] = (
                _y[0, 0].copy()
                - pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
                - pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
            )
            # right down
            _X[8, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
            _X[8, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
            _y[8, 0] = (
                _y[0, 0].copy()
                + pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
                - pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
            )

        # As of sklearn 0.24.1 a zeroed _y raises an exception while sklearn 0.23 returns zeroed x_rotations
        # For now the try/except below is a workaround to restore the 0.23 behaviour
        try:
            _pls.fit(_X.copy(), _y.copy())
            coeff_pls[i, :, :] = _pls.x_rotations_
        except StopIteration:
            coeff_pls[i, :, :] = 0

        # Add additional points
        if extra_points != 0:
            max_coeff = np.argsort(np.abs(coeff_pls[i, :, 0]))[-extra_points:]
            for ii in max_coeff:
                XX = np.vstack((XX, X[i, :]))
                XX[-1, ii] += delta_x * (xlimits[ii, 1] - xlimits[ii, 0])
                yy = np.vstack((yy, y[i, 0]))
                yy[-1, 0] += (
                    pts[None][1 + ii][1][i, 0]
                    * delta_x
                    * (xlimits[ii, 1] - xlimits[ii, 0])
                )
    return np.abs(coeff_pls).mean(axis=0), XX, yy