Beispiel #1
0
    def set_training_derivatives(self, xt, dyt_dxt, kx, name=None):
        """
        Set training data (derivatives).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        check_support(self, "training_derivatives")

        xt = ensure_2d_array(xt, "xt")
        dyt_dxt = ensure_2d_array(dyt_dxt, "dyt_dxt")

        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "the first dimension of xt and dyt_dxt must have the same length"
            )

        if not isinstance(kx, int):
            raise ValueError("kx must be an int")

        self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]
Beispiel #2
0
    def set_training_values(self, xt, yt, name=None):
        """
        Set training data (values).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        xt = ensure_2d_array(xt, "xt")
        yt = ensure_2d_array(yt, "yt")

        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "the first dimension of xt and yt must have the same length")

        self.nt = xt.shape[0]
        self.nx = xt.shape[1]
        self.ny = yt.shape[1]
        kx = 0
        self.training_points[name][kx] = [np.array(xt), np.array(yt)]
Beispiel #3
0
def cast_to_discrete_values(xtypes, x):
    """
    see MixedIntegerContext.cast_to_discrete_values
    """
    ret = ensure_2d_array(x, "x").copy()
    x_col = 0
    for xtyp in xtypes:
        if xtyp == FLOAT:
            x_col += 1
            continue

        elif xtyp == INT:
            ret[:, x_col] = np.round(ret[:, x_col])
            x_col += 1

        elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
            # Categorial : The biggest level is selected.
            xenum = ret[:, x_col:x_col + xtyp[1]]
            maxx = np.max(xenum, axis=1).reshape((-1, 1))
            mask = xenum < maxx
            xenum[mask] = 0
            xenum[~mask] = 1
            x_col = x_col + xtyp[1]
        else:
            _raise_value_error(xtyp)
    return ret
Beispiel #4
0
    def __call__(self, x, kx=None):
        """
        Evaluate the function.

        Parameters
        ----------
        x : ndarray[n, nx] or ndarray[n]
            Evaluation points where n is the number of evaluation points.
        kx : int or None
            Index of derivative (0-based) to return values with respect to.
            None means return function value rather than derivative.

        Returns
        -------
        ndarray[n, 1]
            Functions values if kx=None or derivative values if kx is an int.
        """
        x = ensure_2d_array(x, "x")

        if x.shape[1] != self.options["ndim"]:
            raise ValueError("The second dimension of x should be %i" %
                             self.options["ndim"])

        if kx is not None:
            if not isinstance(kx, int) or kx < 0:
                raise TypeError("kx should be None or a non-negative int.")

        y = self._evaluate(x, kx)

        if self.options["return_complex"]:
            return y
        else:
            return np.real(y)
Beispiel #5
0
def cast_to_discrete_values(xtypes, xlimits, categorical_kernel, x):
    """
    see MixedIntegerContext.cast_to_discrete_values
    """
    ret = ensure_2d_array(x, "x").copy()
    x_col = 0
    for i, xtyp in enumerate(xtypes):
        if xtyp == FLOAT:
            x_col += 1
            continue
        elif xtyp == ORD:
            if isinstance(xlimits[i][0], str):
                listint = list(map(float, xlimits[i]))
                ret[:, x_col] = take_closest_in_list(listint, ret[:, x_col])
            else:
                ret[:, x_col] = np.round(ret[:, x_col])
            x_col += 1
        elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
            if categorical_kernel is None:
                # Categorial : The biggest level is selected.
                xenum = ret[:, x_col:x_col + xtyp[1]]
                maxx = np.max(xenum, axis=1).reshape((-1, 1))
                mask = xenum < maxx
                xenum[mask] = 0
                xenum[~mask] = 1
                x_col = x_col + xtyp[1]
            else:
                ret[:, x_col] = np.round(ret[:, x_col])
                x_col += 1
        else:
            _raise_value_error(xtyp)
    return ret
Beispiel #6
0
    def update_training_values(self, yt, name=None):
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None, optional
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications). The default is None.

        Raises
        ------
        ValueError
            The training points must be set first with set_training_values before calling update_training_values.
            The number of training points does not agree with the earlier call of set_training_values.
        """
        yt = ensure_2d_array(yt, "yt")

        kx = 0

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values.")

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values.")

        self.training_points[name][kx][1] = np.array(yt)
Beispiel #7
0
 def predict_variances(self, x):
     xp = ensure_2d_array(x, "xp")
     if self._input_in_folded_space:
         x2 = unfold_with_enum_mask(self._xtypes, xp)
     else:
         x2 = xp
     return self._surrogate.predict_variances(
         cast_to_discrete_values(self._xtypes, x2))
Beispiel #8
0
 def set_training_values(self, xt, yt, name=None):
     xt = ensure_2d_array(xt, "xt")
     if self._input_in_folded_space:
         xt2 = unfold_with_enum_mask(self._xtypes, xt)
     else:
         xt2 = xt
     super().set_training_values(xt2, yt)
     self._surrogate.set_training_values(xt2, yt, name)
Beispiel #9
0
    def set_training_values(self, xt, yt, name=None):

        xt = ensure_2d_array(xt, "xt")
        if self._input_in_folded_space:
            xt2 = unfold_with_enum_mask(self._xtypes, xt)
        else:
            xt2 = xt
        xt2 = cast_to_discrete_values(self._xtypes, self._xlimits,
                                      self._categorical_kernel, xt2)
        super().set_training_values(xt2, yt)
        self._surrogate.set_training_values(xt2, yt, name)
Beispiel #10
0
 def set_training_values(self, xt, yt, name=None):
     xt = ensure_2d_array(xt, "xt")
     if self._use_gower_distance:
         super().set_training_values(xt, yt)
         self._surrogate.options["corr"] = "gower"
         self._surrogate.set_training_values(xt, yt, name)
     else:
         if self._input_in_folded_space:
             xt2 = unfold_with_enum_mask(self._xtypes, xt)
         else:
             xt2 = xt
         super().set_training_values(xt2, yt)
         self._surrogate.set_training_values(xt2, yt, name)
Beispiel #11
0
    def predict_derivatives(self, x: np.ndarray, kx: int) -> np.ndarray:
        """
        Predict the dy_dx derivatives at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        dy_dx : np.ndarray[nt, ny]
            Derivatives.
        """
        check_support(self, "derivatives")
        x = ensure_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (
            self.options["print_global"] and self.options["print_prediction"]
        )

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_derivatives(x, kx)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()

        return y.reshape((n, self.ny))
Beispiel #12
0
    def predict_variances(self, x):
        """
        Predict the variances at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        s2 : np.ndarray[nt, ny]
            Variances.
        """
        check_support(self, "variances")
        x = ensure_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        x2 = np.copy(x)
        s2 = self._predict_variances(x2)
        return s2.reshape((n, self.ny))
Beispiel #13
0
    def predict_output_derivatives(self, x):
        """
        Predict the derivatives dy_dyt at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        dy_dyt : dict of np.ndarray[nt, nt]
            Dictionary of output derivatives.
            Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
        """
        check_support(self, "output_derivatives")
        x = ensure_2d_array(x, "x")
        check_nx(self.nx, x)

        dy_dyt = self._predict_output_derivatives(x)
        return dy_dyt
Beispiel #14
0
    def predict_variance_derivatives(self, x):
        """
        Predict the derivation of the variance at a point

        Parameters:
        -----------
        x : np.ndarray
            Input value for the prediction point.

        Returns:
        --------
        derived_variance: np.ndarray
            The jacobian of the variance
        """
        x = ensure_2d_array(x, "x")
        check_support(self, "variance_derivatives")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (
            self.options["print_global"] and self.options["print_prediction"]
        )

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_variance_derivatives(x)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()

        return y
Beispiel #15
0
    def update_training_derivatives(
        self, dyt_dxt: np.ndarray, kx: int, name: Optional[str] = None
    ) -> None:
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name :str or None, optional
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).

        Raises
        ------
        ValueError
            The training points must be set first with set_training_values before calling update_training_values..
            The number of training points does not agree with the earlier call of set_training_values.
        """
        check_support(self, "training_derivatives")

        dyt_dxt = ensure_2d_array(dyt_dxt, "dyt_dxt")

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values."
            )

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values."
            )

        self.training_points[name][kx + 1][1] = np.array(dyt_dxt)
Beispiel #16
0
    def predict_values(self, x: np.ndarray) -> np.ndarray:
        """
        Predict the output values at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        y : np.ndarray[nt, ny]
            Output values at the prediction points.
        """
        x = ensure_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        x2 = np.copy(x)
        self.printer.active = (
            self.options["print_global"] and self.options["print_prediction"]
        )

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_values(x2)
        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()
        return y.reshape((n, self.ny))