Esempio n. 1
0
    def _predict_values(self, x):
        """
        Evaluates the model at a set of points.

        Parameters
        ----------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        # Initialization
        n_eval, n_features_x = x.shape
        x = (x - self.X_offset) / self.X_scale
        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma.copy())
        d = self._componentwise_distance(dx)
        # Compute the correlation function
        r = self._correlation_types[self.options["corr"]](self.optimal_theta,
                                                          d).reshape(
                                                              n_eval, self.nt)
        y = np.zeros(n_eval)
        # Compute the regression function
        f = self._regression_types[self.options["poly"]](x)
        # Scaled predictor
        y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(
            r, self.optimal_par["gamma"])
        # Predictor
        y = (self.y_mean + self.y_std * y_).ravel()

        return y
Esempio n. 2
0
    def _predict_derivatives(self, x, kx):
        """
        Evaluates the derivatives at a set of points.

        Parameters
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        y : np.ndarray
            Derivative values.
        """
        # Initialization
        n_eval, n_features_x = x.shape
        if self.options["corr"] == "gower":
            r = np.exp(-gower_matrix(
                x, data_y=self.X_train, weight=np.asarray(self.optimal_theta)))
        else:
            x = (x - self.X_offset) / self.X_scale
            # Get pairwise componentwise L1-distances to the input training set
            dx = differences(x, Y=self.X_norma.copy())
            d = self._componentwise_distance(dx)
            # Compute the correlation function
            r = self._correlation_types[self.options["corr"]](
                self.optimal_theta, d).reshape(n_eval, self.nt)

        if self.options["corr"] != "squar_exp":
            raise ValueError(
                "The derivative is only available for squared exponential kernel"
            )
        if self.options["poly"] == "constant":
            df = np.zeros((1, self.nx))
        elif self.options["poly"] == "linear":
            df = np.zeros((self.nx + 1, self.nx))
            df[1:, :] = np.eye(self.nx)
        else:
            raise ValueError(
                "The derivative is only available for ordinary kriging or " +
                "universal kriging using a linear trend")

        # Beta and gamma = R^-1(y-FBeta)
        beta = self.optimal_par["beta"]
        gamma = self.optimal_par["gamma"]
        df_dx = np.dot(df.T, beta)
        d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma[:, kx].reshape(
            (1, self.nt))
        if self.name != "Kriging" and "KPLSK" not in self.name:
            theta = np.sum(self.optimal_theta * self.coeff_pls**2, axis=1)
        else:
            theta = self.optimal_theta
        y = ((df_dx[kx] - 2 * theta[kx] * np.dot(d_dx * r, gamma)) *
             self.y_std / self.X_scale[kx])
        return y
Esempio n. 3
0
    def _predict_values(self, x):
        """
        Evaluates the model at a set of points.

        Parameters
        ----------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        # Initialization
        n_eval, n_features_x = x.shape
        if self.options["corr"] == "gower":
            # Compute the correlation function
            r = np.exp(-gower_matrix(
                x, data_y=self.X_train, weight=np.asarray(self.optimal_theta)))
            if not isinstance(x, np.ndarray):
                is_number = np.vectorize(
                    lambda x: not np.issubdtype(x, np.number))
                cat_features = is_number(x.dtypes)
            else:
                cat_features = np.zeros(n_features_x, dtype=bool)
                for col in range(n_features_x):
                    if not np.issubdtype(type(x[0, col]), np.number):
                        cat_features[col] = True
                if not isinstance(x, np.ndarray):
                    x = np.asarray(x)
            X_cont = x[:, np.logical_not(cat_features)].astype(np.float)
            X_cont = (X_cont - self.X_offset) / self.X_scale
            # Compute the regression function
            f = self._regression_types[self.options["poly"]](X_cont)
            # Scaled predictor
            y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(
                r, self.optimal_par["gamma"])
            # Predictor
            y = (self.y_mean + self.y_std * y_).ravel()
        else:
            x = (x - self.X_offset) / self.X_scale
            # Get pairwise componentwise L1-distances to the input training set
            dx = differences(x, Y=self.X_norma.copy())
            d = self._componentwise_distance(dx)
            # Compute the correlation function
            r = self._correlation_types[self.options["corr"]](
                self.optimal_theta, d).reshape(n_eval, self.nt)
            y = np.zeros(n_eval)
            # Compute the regression function
            f = self._regression_types[self.options["poly"]](x)
            # Scaled predictor
            y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(
                r, self.optimal_par["gamma"])
            # Predictor
            y = (self.y_mean + self.y_std * y_).ravel()
        return y
Esempio n. 4
0
    def _predict_variances(self, x):
        """
        Provide uncertainty of the model at a set of points

        Parameters
        ----------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        MSE : np.ndarray
            Evaluation point output variable MSE
        """
        # Initialization
        n_eval, n_features_x = x.shape
        x = (x - self.X_mean) / self.X_std
        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma.copy())
        d = self._componentwise_distance(dx)

        # Compute the correlation function
        r = self._correlation_types[self.options["corr"]](self.optimal_theta,
                                                          d).reshape(
                                                              n_eval, self.nt)

        C = self.optimal_par["C"]
        rt = linalg.solve_triangular(C, r.T, lower=True)

        u = linalg.solve_triangular(
            self.optimal_par["G"].T,
            np.dot(self.optimal_par["Ft"].T, rt) -
            self._regression_types[self.options["poly"]](x).T,
        )

        A = self.optimal_par["sigma2"]
        B = 1.0 - (rt**2.0).sum(axis=0) + (u**2.0).sum(axis=0)
        MSE = np.einsum("i,j -> ji", A, B)

        # Mean Squared Error might be slightly negative depending on
        # machine precision: force to zero!
        MSE[MSE < 0.0] = 0.0
        return MSE
Esempio n. 5
0
    def _predict_derivatives(self, x, kx):
        """
        Evaluates the derivatives at a set of points.

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        y : np.ndarray*self.y_std/self.X_scale[kx])
            Derivative values.
        """

        lvl = self.nlvl
        # Initialization

        n_eval, n_features_x = x.shape
        x = (x - self.X_offset) / self.X_scale

        dy_dx = np.zeros((n_eval, lvl))

        if self.options["corr"] != "squar_exp":
            raise ValueError(
                "The derivative is only available for square exponential kernel"
            )
        if self.options["poly"] == "constant":
            df = np.zeros([n_eval, 1])
        elif self.options["poly"] == "linear":
            df = np.zeros((n_eval, self.nx + 1))
            df[:, 1:] = 1
        else:
            raise ValueError(
                "The derivative is only available for ordinary kriging or " +
                "universal kriging using a linear trend")
        df0 = copy.deepcopy(df)
        if self.options["rho_regr"] != "constant":
            raise ValueError(
                "The derivative is only available for regression rho constant")
        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma_all[0])
        d = self._componentwise_distance(dx)
        # Compute the correlation function
        r_ = self._correlation_types[self.options["corr"]](
            self.optimal_theta[0], d).reshape(n_eval, self.nt_all[0])

        # Beta and gamma = R^-1(y-FBeta)
        beta = self.optimal_par[0]["beta"]
        gamma = self.optimal_par[0]["gamma"]

        df_dx = np.dot(df, beta)
        d_dx = x[:, kx].reshape(
            (n_eval, 1)) - self.X_norma_all[0][:, kx].reshape(
                (1, self.nt_all[0]))
        theta = self.optimal_theta[0]

        dy_dx[:, 0] = np.ravel(
            (df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma)))

        # Calculate recursively derivative at level i
        for i in range(1, lvl):
            F = self.F_all[i]
            C = self.optimal_par[i]["C"]
            g = self._regression_types[self.options["rho_regr"]](x)
            dx = differences(x, Y=self.X_norma_all[i])
            d = self._componentwise_distance(dx)
            r_ = self._correlation_types[self.options["corr"]](
                self.optimal_theta[i], d).reshape(n_eval, self.nt_all[i])
            df = np.vstack((g.T * dy_dx[:, i - 1], df0.T))

            Ft = solve_triangular(C, F, lower=True)
            yt = solve_triangular(C, self.y_norma_all[i], lower=True)
            beta = self.optimal_par[i]["beta"]
            gamma = self.optimal_par[i]["gamma"]

            df_dx = np.dot(df.T, beta)
            d_dx = x[:, kx].reshape(
                (n_eval, 1)) - self.X_norma_all[i][:, kx].reshape(
                    (1, self.nt_all[i]))
            theta = self.optimal_theta[i]
            # scaled predictor
            dy_dx[:, i] = np.ravel(df_dx -
                                   2 * theta[kx] * np.dot(d_dx * r_, gamma))

        return dy_dx[:, -1] * self.y_std / self.X_scale[kx]
Esempio n. 6
0
    def predict_variances_all_levels(self, X):
        """
        Evaluates the model at a set of points.

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        # Initialization X = atleast_2d(X)
        nlevel = self.nlvl
        sigma2_rhos = []
        n_eval, n_features_X = X.shape
        #        if n_features_X != self.n_features:
        #            raise ValueError("Design must be an array of n_features columns.")
        X = (X - self.X_offset) / self.X_scale
        # Calculate kriging mean and variance at level 0
        mu = np.zeros((n_eval, nlevel))
        #        if self.normalize:
        f = self._regression_types[self.options["poly"]](X)
        f0 = self._regression_types[self.options["poly"]](X)
        dx = differences(X, Y=self.X_norma_all[0])
        d = self._componentwise_distance(dx)
        # Get regression function and correlation
        F = self.F_all[0]
        C = self.optimal_par[0]["C"]

        beta = self.optimal_par[0]["beta"]
        Ft = solve_triangular(C, F, lower=True)
        yt = solve_triangular(C, self.y_norma_all[0], lower=True)
        r_ = self._correlation_types[self.options["corr"]](
            self.optimal_theta[0], d).reshape(n_eval, self.nt_all[0])
        gamma = self.optimal_par[0]["gamma"]

        # Scaled predictor
        mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()

        self.sigma2_rho = nlevel * [None]
        MSE = np.zeros((n_eval, nlevel))
        r_t = solve_triangular(C, r_.T, lower=True)
        G = self.optimal_par[0]["G"]

        u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
        sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
        MSE[:, 0] = sigma2 * (
            # 1 + self.noise[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
            1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0))

        # Calculate recursively kriging variance at level i
        for i in range(1, nlevel):
            F = self.F_all[i]
            C = self.optimal_par[i]["C"]
            g = self._regression_types[self.options["rho_regr"]](X)
            dx = differences(X, Y=self.X_norma_all[i])
            d = self._componentwise_distance(dx)
            r_ = self._correlation_types[self.options["corr"]](
                self.optimal_theta[i], d).reshape(n_eval, self.nt_all[i])
            f = np.vstack((g.T * mu[:, i - 1], f0.T))

            Ft = solve_triangular(C, F, lower=True)
            yt = solve_triangular(C, self.y_norma_all[i], lower=True)
            r_t = solve_triangular(C, r_.T, lower=True)
            G = self.optimal_par[i]["G"]
            beta = self.optimal_par[i]["beta"]

            # scaled predictor
            sigma2 = self.optimal_par[i]["sigma2"] / self.y_std**2
            q = self.q_all[i]
            u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
            sigma2_rho = np.dot(
                g,
                sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q] +
                np.dot(beta[:q], beta[:q].T),
            )
            sigma2_rho = (sigma2_rho * g).sum(axis=1)
            sigma2_rhos.append(sigma2_rho)

            MSE[:, i] = sigma2_rho * MSE[:, i - 1] + sigma2 * (
                # 1 + self.noise[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
                1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0))

        # scaled predictor
        MSE *= self.y_std**2

        return MSE, sigma2_rhos
Esempio n. 7
0
    def _predict_intermediate_values(self, X, lvl, descale=True):
        """
        Evaluates the model at a set of points.
        Used for training the model at level lvl.
        Allows to relax the order problem.

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        lvl : level at which the prediction is made

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        n_eval, _ = X.shape
        #        if n_features_X != self.n_features:
        #            raise ValueError("Design must be an array of n_features columns.")

        # Calculate kriging mean and variance at level 0
        mu = np.zeros((n_eval, lvl))
        #        if self.normalize:
        if descale:
            X = (X - self.X_offset) / self.X_scale
        ##                X = (X - self.X_offset[0]) / self.X_scale[0]
        f = self._regression_types[self.options["poly"]](X)
        f0 = self._regression_types[self.options["poly"]](X)
        dx = differences(X, Y=self.X_norma_all[0])
        d = self._componentwise_distance(dx)
        # Get regression function and correlation
        F = self.F_all[0]
        C = self.optimal_par[0]["C"]

        beta = self.optimal_par[0]["beta"]
        Ft = solve_triangular(C, F, lower=True)
        yt = solve_triangular(C, self.y_norma_all[0], lower=True)
        r_ = self._correlation_types[self.options["corr"]](
            self.optimal_theta[0], d).reshape(n_eval, self.nt_all[0])
        gamma = self.optimal_par[0]["gamma"]

        # Scaled predictor
        mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()

        # Calculate recursively kriging mean and variance at level i
        for i in range(1, lvl):
            F = self.F_all[i]
            C = self.optimal_par[i]["C"]
            g = self._regression_types[self.options["rho_regr"]](X)
            dx = differences(X, Y=self.X_norma_all[i])
            d = self._componentwise_distance(dx)
            r_ = self._correlation_types[self.options["corr"]](
                self.optimal_theta[i], d).reshape(n_eval, self.nt_all[i])
            f = np.vstack((g.T * mu[:, i - 1], f0.T))
            Ft = solve_triangular(C, F, lower=True)
            yt = solve_triangular(C, self.y_norma_all[i], lower=True)
            beta = self.optimal_par[i]["beta"]
            gamma = self.optimal_par[i]["gamma"]
            # scaled predictor
            mu[:, i] = (np.dot(f.T, beta) + np.dot(r_, gamma)).ravel()

        # scaled predictor
        if descale:
            mu = mu * self.y_std + self.y_mean

        return mu[:, -1].reshape((n_eval, 1))
Esempio n. 8
0
 def _differences(self, X, Y):
     """
     Compute the distances
     """
     return differences(X, Y)
Esempio n. 9
0
    def _predict_variance_derivatives(self, x):
        """
        Provide the derivative of the variance of the model at a set of points
        Parameters
        -----------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        Returns
        -------
         derived_variance:  np.ndarray
             The jacobian of the variance of the kriging model
        """

        # Initialization
        n_eval, n_features_x = x.shape
        x = (x - self.X_offset) / self.X_scale
        theta = self.optimal_theta
        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma.copy())
        d = self._componentwise_distance(dx)
        dd = self._componentwise_distance(dx,
                                          theta=self.optimal_theta,
                                          return_derivative=True)
        sigma2 = self.optimal_par["sigma2"]

        cholesky_k = self.optimal_par["C"]

        derivative_dic = {"dx": dx, "dd": dd}

        r, dr = self._correlation_types[self.options["corr"]](
            theta, d, derivative_params=derivative_dic)
        rho1 = linalg.solve_triangular(cholesky_k, r, lower=True)
        invKr = linalg.solve_triangular(cholesky_k.T, rho1)

        p1 = np.dot(dr.T, invKr).T

        p2 = np.dot(invKr.T, dr)

        f_x = self._regression_types[self.options["poly"]](x).T
        F = self.F

        rho2 = linalg.solve_triangular(cholesky_k, F, lower=True)
        invKF = linalg.solve_triangular(cholesky_k.T, rho2)

        A = f_x.T - np.dot(r.T, invKF)

        B = np.dot(F.T, invKF)

        rho3 = linalg.cholesky(B, lower=True)
        invBAt = linalg.solve_triangular(rho3, A.T, lower=True)
        D = linalg.solve_triangular(rho3.T, invBAt)

        if self.options["poly"] == "constant":
            df = np.zeros((1, self.nx))
        elif self.options["poly"] == "linear":
            df = np.zeros((self.nx + 1, self.nx))
            df[1:, :] = np.eye(self.nx)
        else:
            raise ValueError(
                "The derivative is only available for ordinary kriging or " +
                "universal kriging using a linear trend")

        dA = df.T - np.dot(dr.T, invKF)
        p3 = np.dot(dA, D).T
        p4 = np.dot(D.T, dA.T)
        prime = -p1 - p2 + p3 + p4

        derived_variance = []
        x_std = np.resize(self.X_scale, self.nx)

        for i in range(len(x_std)):
            derived_variance.append(sigma2 * prime.T[i] / x_std[i])

        return np.array(derived_variance).T
Esempio n. 10
0
    def _predict_variances(self, x):
        """
        Provide uncertainty of the model at a set of points
        Parameters
        ----------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        Returns
        -------
        MSE : np.ndarray
            Evaluation point output variable MSE
        """
        # Initialization
        n_eval, n_features_x = x.shape
        if self.options["corr"] == "gower":
            # Compute the correlation function

            r = np.exp(-gower_matrix(
                x, data_y=self.X_train, weight=np.asarray(self.optimal_theta)))

            if not isinstance(x, np.ndarray):
                is_number = np.vectorize(
                    lambda x: not np.issubdtype(x, np.number))
                cat_features = is_number(x.dtypes)
            else:
                cat_features = np.zeros(n_features_x, dtype=bool)
                for col in range(n_features_x):
                    if not np.issubdtype(type(x[0, col]), np.number):
                        cat_features[col] = True

                if not isinstance(x, np.ndarray):
                    x = np.asarray(x)

            X_cont = x[:, np.logical_not(cat_features)].astype(np.float)
            C = self.optimal_par["C"]
            rt = linalg.solve_triangular(C, r.T, lower=True)

            u = linalg.solve_triangular(
                self.optimal_par["G"].T,
                np.dot(self.optimal_par["Ft"].T, rt) -
                self._regression_types[self.options["poly"]](X_cont).T,
            )
        else:
            x = (x - self.X_offset) / self.X_scale
            # Get pairwise componentwise L1-distances to the input training set
            dx = differences(x, Y=self.X_norma.copy())
            d = self._componentwise_distance(dx)
            # Compute the correlation function
            r = self._correlation_types[self.options["corr"]](
                self.optimal_theta, d).reshape(n_eval, self.nt)

            C = self.optimal_par["C"]
            rt = linalg.solve_triangular(C, r.T, lower=True)

            u = linalg.solve_triangular(
                self.optimal_par["G"].T,
                np.dot(self.optimal_par["Ft"].T, rt) -
                self._regression_types[self.options["poly"]](x).T,
            )

        A = self.optimal_par["sigma2"]
        B = 1.0 - (rt**2.0).sum(axis=0) + (u**2.0).sum(axis=0)
        MSE = np.einsum("i,j -> ji", A, B)

        # Mean Squared Error might be slightly negative depending on
        # machine precision: force to zero!
        MSE[MSE < 0.0] = 0.0
        return MSE
Esempio n. 11
0
    def _predict_variance_derivatives_hyper(self, x, u=None):
        """
        Compute the derivatives of the variance of the GP with respect to the hyperparameters

        Parameters
        ----------
        x : numpy.ndarray
            Point to compute in initial dimension.
        u : numpy.ndarray, optional
            Point to compute in small dimension. The default is None.

        Returns
        -------
        dMSE : numpy.ndarrray
            derivatives of the variance of the GP with respect to the hyperparameters.
        MSE : TYPE
            Variance of the GP.

        """
        # Initialization
        n_eval, n_features_x = x.shape

        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma.copy())
        d_x = self._componentwise_distance(dx)
        if u is not None:
            theta = np.eye(self.options["n_comp"]).reshape(
                (self.options["n_comp"] ** 2,)
            )
            # Get pairwise componentwise L1-distances to the input training set
            du = differences(u, Y=self.U_norma.copy())
            d = self._componentwise_distance(du, small=True)
        else:
            theta = self.optimal_theta
            # Get pairwise componentwise L1-distances to the input training set
            d = d_x
            d_x = None

        # Compute the correlation function
        r = (
            self._correlation_types[self.options["corr"]](theta, d, d_x=d_x)
            .reshape(n_eval, self.nt)
            .T
        )
        f = self._regression_types[self.options["poly"]](x).T

        C = self.optimal_par["C"]
        G = self.optimal_par["G"]
        Ft = self.optimal_par["Ft"]
        sigma2 = self.optimal_par["sigma2"]

        rt = linalg.solve_triangular(C, r, lower=True)

        F_Rinv_r = np.dot(Ft.T, rt)

        u_ = linalg.solve_triangular(G.T, f - F_Rinv_r)

        MSE = self.optimal_par["sigma2"] * (
            1.0 - (rt ** 2.0).sum(axis=0) + (u_ ** 2.0).sum(axis=0)
        )
        # Mean Squared Error might be slightly negative depending on
        # machine precision: force to zero!
        MSE[MSE < 0.0] = 0.0

        Ginv_u = linalg.solve_triangular(G, u_, lower=False)
        Rinv_F = linalg.solve_triangular(C.T, Ft, lower=False)
        Rinv_r = linalg.solve_triangular(C.T, rt, lower=False)
        Rinv_F_Ginv_u = Rinv_F.dot(Ginv_u)

        dMSE = np.zeros((len(self.optimal_theta), n_eval))

        dr_all = self.optimal_par["dr"]
        dsigma = self.optimal_par["dsigma"]

        for omega in range(len(self.optimal_theta)):
            drdomega = (
                self._correlation_types[self.options["corr"]](
                    theta, d, grad_ind=omega, d_x=d_x
                )
                .reshape(n_eval, self.nt)
                .T
            )

            dRdomega = np.zeros((self.nt, self.nt))
            dRdomega[self.ij[:, 0], self.ij[:, 1]] = dr_all[omega][:, 0]
            dRdomega[self.ij[:, 1], self.ij[:, 0]] = dr_all[omega][:, 0]

            # Compute du2dtheta

            dRdomega_Rinv_F_Ginv_u = dRdomega.dot(Rinv_F_Ginv_u)
            r_Rinv_dRdomega_Rinv_F_Ginv_u = np.einsum(
                "ij,ij->i", Rinv_r.T, dRdomega_Rinv_F_Ginv_u.T
            )
            drdomega_Rinv_F_Ginv_u = np.einsum("ij,ij->i", drdomega.T, Rinv_F_Ginv_u.T)
            u_Ginv_F_Rinv_dRdomega_Rinv_F_Ginv_u = np.einsum(
                "ij,ij->i", Rinv_F_Ginv_u.T, dRdomega_Rinv_F_Ginv_u.T
            )

            du2domega = (
                2.0 * r_Rinv_dRdomega_Rinv_F_Ginv_u
                - 2.0 * drdomega_Rinv_F_Ginv_u
                + u_Ginv_F_Rinv_dRdomega_Rinv_F_Ginv_u
            )
            du2domega = np.atleast_2d(du2domega)

            # Compute drt2dtheta
            drdomega_Rinv_r = np.einsum("ij,ij->i", drdomega.T, Rinv_r.T)
            r_Rinv_dRdomega_Rinv_r = np.einsum(
                "ij,ij->i", Rinv_r.T, dRdomega.dot(Rinv_r).T
            )

            drt2domega = 2.0 * drdomega_Rinv_r - r_Rinv_dRdomega_Rinv_r
            drt2domega = np.atleast_2d(drt2domega)

            dMSE[omega] = dsigma[omega] * MSE / sigma2 + sigma2 * (
                -drt2domega + du2domega
            )

        return dMSE, MSE
Esempio n. 12
0
    def _predict_value_derivatives_hyper(self, x, u=None):
        """
        Compute the derivatives of the mean of the GP with respect to the hyperparameters

        Parameters
        ----------
        x : numpy.ndarray
            Point to compute in initial dimension.
        u : numpy.ndarray, optional
            Point to compute in small dimension. The default is None.

        Returns
        -------
        dy : numpy.ndarray
            Derivatives of the mean of the GP with respect to the hyperparameters.

        """
        # Initialization
        n_eval, _ = x.shape

        # Get pairwise componentwise L1-distances to the input training set
        dx = differences(x, Y=self.X_norma.copy())
        d_x = self._componentwise_distance(dx)
        if u is not None:
            theta = np.eye(self.options["n_comp"]).reshape(
                (self.options["n_comp"] ** 2,)
            )

            # Get pairwise componentwise L1-distances to the input training set
            du = differences(u, Y=self.U_norma.copy())
            d = self._componentwise_distance(du, small=True)
        else:
            theta = self.optimal_theta

            # Get pairwise componentwise L1-distances to the input training set
            d = d_x
            d_x = None

        # Compute the correlation function
        r = self._correlation_types[self.options["corr"]](theta, d, d_x=d_x).reshape(
            n_eval, self.nt
        )
        # Compute the regression function
        f = self._regression_types[self.options["poly"]](x)

        dy = np.zeros((len(self.optimal_theta), n_eval))

        gamma = self.optimal_par["gamma"]
        Rinv_dR_gamma = self.optimal_par["Rinv_dR_gamma"]
        Rinv_dmu = self.optimal_par["Rinv_dmu"]

        for omega in range(len(self.optimal_theta)):
            drdomega = self._correlation_types[self.options["corr"]](
                theta, d, grad_ind=omega, d_x=d_x
            ).reshape(n_eval, self.nt)

            dbetadomega = self.optimal_par["dbeta_all"][omega]

            dy_omega = (
                f.dot(dbetadomega)
                + drdomega.dot(gamma)
                - r.dot(Rinv_dR_gamma[omega] + Rinv_dmu[omega])
            )

            dy[omega, :] = dy_omega[:, 0]

        return dy
Esempio n. 13
0
    def predict_values(self, x):
        """
        Predict the value of the MGP for a given point

        Parameters
        ----------
        x : numpy.ndarray
            Point to compute.

        Raises
        ------
        ValueError
            The number fo dimension is not good.

        Returns
        -------
        y : numpy.ndarray
            Value of the MGP at the given point x.

        """
        n_eval, n_features = x.shape
        if n_features < self.nx:
            if n_features != self.options["n_comp"]:
                raise ValueError(
                    "dim(u) should be equal to %i" % self.options["n_comp"]
                )
            theta = np.eye(self.options["n_comp"]).reshape(
                (self.options["n_comp"] ** 2,)
            )
            # Get pairwise componentwise L1-distances to the input training set
            u = x
            x = self.get_x_from_u(u)

            u = u * self.embedding["norm"] - self.U_mean
            du = differences(u, Y=self.U_norma.copy())
            d = self._componentwise_distance(du, small=True)

            # Get an approximation of x
            x = (x - self.X_mean) / self.X_std
            dx = differences(x, Y=self.X_norma.copy())
            d_x = self._componentwise_distance(dx)
        else:
            if n_features != self.nx:
                raise ValueError("dim(x) should be equal to %i" % self.X_std.shape[0])
            theta = self.optimal_theta

            # Get pairwise componentwise L1-distances to the input training set
            x = (x - self.X_mean) / self.X_std
            dx = differences(x, Y=self.X_norma.copy())
            d = self._componentwise_distance(dx)
            d_x = None

        # Compute the correlation function
        r = self._correlation_types[self.options["corr"]](theta, d, d_x=d_x).reshape(
            n_eval, self.nt
        )

        f = self._regression_types[self.options["poly"]](x)
        # Scaled predictor
        y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(r, self.optimal_par["gamma"])
        # Predictor
        y = (self.y_mean + self.y_std * y_).ravel()
        return y