Beispiel #1
0
    def set_training_values(self, xt, yt, name=None):
        """
        Set training data (values).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        xt = check_2d_array(xt, "xt")
        yt = check_2d_array(yt, "yt")

        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "the first dimension of xt and yt must have the same length")

        self.nt = xt.shape[0]
        self.nx = xt.shape[1]
        self.ny = yt.shape[1]
        kx = 0
        self.training_points[name][kx] = [np.array(xt), np.array(yt)]
Beispiel #2
0
    def set_training_derivatives(self, xt, dyt_dxt, kx, name=None):
        """
        Set training data (derivatives).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        check_support(self, "training_derivatives")

        xt = check_2d_array(xt, "xt")
        dyt_dxt = check_2d_array(dyt_dxt, "dyt_dxt")

        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "the first dimension of xt and dyt_dxt must have the same length"
            )

        if not isinstance(kx, int):
            raise ValueError("kx must be an int")

        self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]
Beispiel #3
0
    def update_training_derivatives(self, dyt_dxt, kx, name=None):
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        check_support(self, "training_derivatives")

        dyt_dxt = check_2d_array(dyt_dxt, "dyt_dxt")

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values.")

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values.")

        self.training_points[name][kx + 1][1] = np.array(dyt_dxt)
def cast_to_discrete_values(xtypes, x):
    """
    see MixedIntegerContext.cast_to_discrete_values
    """
    ret = check_2d_array(x, "x").copy()
    x_col = 0
    for xtyp in xtypes:
        if xtyp == FLOAT:
            x_col += 1
            continue

        elif xtyp == INT:
            ret[:, x_col] = np.round(ret[:, x_col])
            x_col += 1

        elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
            # Categorial : The biggest level is selected.
            xenum = ret[:, x_col:x_col + xtyp[1]]
            maxx = np.max(xenum, axis=1).reshape((-1, 1))
            mask = xenum < maxx
            xenum[mask] = 0
            xenum[~mask] = 1
            x_col = x_col + xtyp[1]
        else:
            _raise_value_error(xtyp)
    return ret
Beispiel #5
0
    def update_training_values(self, yt, name=None):
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        yt = check_2d_array(yt, "yt")

        kx = 0

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values.")

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values.")

        self.training_points[name][kx][1] = np.array(yt)
Beispiel #6
0
    def __call__(self, x, kx=None):
        """
        Evaluate the function.

        Parameters
        ----------
        x : ndarray[n, nx] or ndarray[n]
            Evaluation points where n is the number of evaluation points.
        kx : int or None
            Index of derivative (0-based) to return values with respect to.
            None means return function value rather than derivative.

        Returns
        -------
        ndarray[n, 1]
            Functions values if kx=None or derivative values if kx is an int.
        """
        x = check_2d_array(x, "x")

        if x.shape[1] != self.options["ndim"]:
            raise ValueError("The second dimension of x should be %i" %
                             self.options["ndim"])

        if kx is not None:
            if not isinstance(kx, int) or kx < 0:
                raise TypeError("kx should be None or a non-negative int.")

        y = self._evaluate(x, kx)

        if self.options["return_complex"]:
            return y
        else:
            return np.real(y)
 def predict_variances(self, x):
     xp = check_2d_array(x, "xp")
     if self._input_in_folded_space:
         x2 = unfold_with_enum_mask(self._xtypes, xp)
     else:
         x2 = xp
     return self._surrogate.predict_variances(
         cast_to_discrete_values(self._xtypes, x2))
 def set_training_values(self, xt, yt, name=None):
     xt = check_2d_array(xt, "xt")
     if self._input_in_folded_space:
         xt2 = unfold_with_enum_mask(self._xtypes, xt)
     else:
         xt2 = xt
     super().set_training_values(xt2, yt)
     self._surrogate.set_training_values(xt2, yt, name)
Beispiel #9
0
    def predict_derivatives(self, x, kx):
        """
        Predict the dy_dx derivatives at a set of points.

        Parameters
        ----------
        x : np.ndarray[n, nx] or np.ndarray[n]
            Input values for the prediction points.
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        dy_dx : np.ndarray[n, ny]
            Derivatives.
        """
        check_support(self, 'derivatives')

        x = check_2d_array(x, 'x')
        check_nx(self.nx, x)

        n = x.shape[0]

        self.printer.active = self.options['print_global'] and self.options[
            'print_prediction']

        if self.name == 'MixExp':
            # Mixture of experts model
            self.printer._title('Evaluation of the Mixture of experts')
        else:
            self.printer._title('Evaluation')
        self.printer('   %-12s : %i' % ('# eval points.', n))
        self.printer()

        #Evaluate the unknown points using the specified model-method
        with self.printer._timed_context('Predicting', key='prediction'):
            y = self._predict_derivatives(x, kx)

        time_pt = self.printer._time('prediction')[-1] / n
        self.printer()
        self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)
        self.printer()

        return y.reshape((n, self.ny))
Beispiel #10
0
    def predict_derivatives(self, x, kx):
        """
        Predict the dy_dx derivatives at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        dy_dx : np.ndarray[nt, ny]
            Derivatives.
        """
        check_support(self, "derivatives")
        x = check_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (self.options["print_global"]
                               and self.options["print_prediction"])

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_derivatives(x, kx)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()

        return y.reshape((n, self.ny))
Beispiel #11
0
    def predict_values(self, x):
        """
        Predict the output values at a set of points.

        Parameters
        ----------
        x : np.ndarray[n, nx] or np.ndarray[n]
            Input values for the prediction points.

        Returns
        -------
        y : np.ndarray[n, ny]
            Output values at the prediction points.
        """
        x = check_2d_array(x, 'x')
        check_nx(self.nx, x)

        n = x.shape[0]

        self.printer.active = self.options['print_global'] and self.options[
            'print_prediction']

        if self.name == 'MixExp':
            # Mixture of experts model
            self.printer._title('Evaluation of the Mixture of experts')
        else:
            self.printer._title('Evaluation')
        self.printer('   %-12s : %i' % ('# eval points.', n))
        self.printer()

        #Evaluate the unknown points using the specified model-method
        with self.printer._timed_context('Predicting', key='prediction'):
            y = self._predict_values(x)

        time_pt = self.printer._time('prediction')[-1] / n
        self.printer()
        self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)
        self.printer()

        return y.reshape((n, self.ny))
Beispiel #12
0
    def predict_values(self, x):
        """
        Predict the output values at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        y : np.ndarray[nt, ny]
            Output values at the prediction points.
        """
        x = check_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (self.options["print_global"]
                               and self.options["print_prediction"])

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_values(x)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()
        return y.reshape((n, self.ny))