Example #1
0
class Sampling(object):
    def __init__(self, **kwargs):
        self.options = OptionsDictionary()
        self.options.declare('xlimits', types=np.ndarray)
        self._declare_options()
        self.options.update(kwargs)

    def _declare_options(self):
        pass

    def __call__(self, n):
        """
        Compute the requested number of sampling points.

        Arguments
        ---------
        n : int
            Number of points requested.

        Returns
        -------
        ndarray[n, nx]
            The sampling locations in the input space.
        """
        xlimits = self.options['xlimits']
        nx = xlimits.shape[0]

        x = self._compute(n)
        for kx in range(nx):
            x[:,
              kx] = xlimits[kx,
                            0] + x[:, kx] * (xlimits[kx, 1] - xlimits[kx, 0])

        return x
Example #2
0
class LinearSolver(object):
    def __init__(self, **kwargs):
        self.mtx = None
        self.rhs = None

        self.options = OptionsDictionary()
        self.options.declare("print_init", True, types=bool)
        self.options.declare("print_solve", True, types=bool)
        self._initialize()
        self.options.update(kwargs)

    def _initialize(self):
        pass

    def _setup(self, mtx, printer, mg_matrices=[]):
        pass

    def _solve(self, rhs, sol=None, ind_y=0):
        pass

    def _clone(self):
        clone = self.__class__()
        clone.options.update(clone.options._dict)
        return clone

    @contextlib.contextmanager
    def _active(self, active):
        orig_active = self.printer.active

        self.printer.active = self.printer.active and active
        yield self.printer
        self.printer.active = orig_active
Example #3
0
class SurrogateBasedApplication:

    if compiled_available:
        _surrogate_type = {
            "KRG": KRG,
            "LS": LS,
            "QP": QP,
            "KPLS": KPLS,
            "KPLSK": KPLSK,
            "GEKPLS": GEKPLS,
            "RBF": RBF,
            "RMTC": RMTC,
            "RMTB": RMTB,
            "IDW": IDW,
            "MGP": MGP,
        }
    else:
        _surrogate_type = {
            "KRG": KRG,
            "LS": LS,
            "QP": QP,
            "KPLS": KPLS,
            "KPLSK": KPLSK,
            "GEKPLS": GEKPLS,
            "MGP": MGP,
        }

    def __init__(self, **kwargs):
        """
        Constructor where values of options can be passed in.

        For the list of options, see the documentation for the surrogate model being used.

        Parameters
        ----------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.

        Examples
        --------
        >>> from smt.applications import VFM
        >>> extension = VFM(type_bridge = 'Additive', name_model_LF = QP, name_model_bridge =
                           LS, X_LF = xLF, y_LF = yLF, X_HF = xHF, y_HF = yHF, options_LF =
                           dictOptionLFModel, options_bridge = dictOptionBridgeModel)
        """
        self.options = OptionsDictionary()

        self._initialize()
        self.options.update(kwargs)

    def _initialize(self):
        """
        Implemented by the application to declare options and declare what they support (optional).

        Examples
        --------
        self.options.declare('option_name', default_value, types=(bool, int), desc='description')
        """
        pass
Example #4
0
class Extensions(object):

    if compiled_available:
        _surrogate_type = {
            'KRG': KRG,'LS': LS,'QP': QP,'KPLS':KPLS,'KPLSK':KPLSK,'GEKPLS':GEKPLS,
            'RBF':RBF,'RMTC':RMTC,'RMTB':RMTB,'IDW':IDW}
    else:
        _surrogate_type = {
            'KRG': KRG,'LS': LS,'QP': QP,'KPLS':KPLS,'KPLSK':KPLSK,'GEKPLS':GEKPLS}

    def __init__(self, **kwargs):
        """
        Constructor where values of options can be passed in.

        For the list of options, see the documentation for the surrogate model being used.

        Parameters
        ----------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.

        Examples
        --------
        >>> from smt.extensions import VFM
        >>> extension = VFM(type_bridge = 'Additive', name_model_LF = QP, name_model_bridge =
                           LS, X_LF = xLF, y_LF = yLF, X_HF = xHF, y_HF = yHF, options_LF =
                           dictOptionLFModel, options_bridge = dictOptionBridgeModel)
        """
        self.options = OptionsDictionary()

        self._initialize()
        self.options.update(kwargs)

    def _initialize(self):
        """
        Implemented by the application to declare options and declare what they support (optional).

        Examples
        --------
        self.options.declare('option_name', default_value, types=(bool, int), desc='description')
        """
        pass

    def apply_method(self):
        """
        Run the complete algorithm of the SMT application; e.g.: VFM, ME, EGO...

        """
        self._apply()

    def analyse_results(self, **kwargs):
        """
        Get the final results; e.g., for VFM, two possible analysis are available:
        - kwargs = {x = x, operation = 'predict_values'}
        - kwargs = {x = x, operation = 'predict_derivatives, kx = i}

        """
        return self._analyse_results(**kwargs)
Example #5
0
class Problem(object):
    def __init__(self, **kwargs):
        self.options = OptionsDictionary()
        self.options.declare('ndim', 1, types=int)
        self._declare_options()
        self.options.update(kwargs)

        self.xlimits = np.zeros((self.options['ndim'], 2))

        self._initialize()

    def _declare_options(self):
        pass

    def _initialize(self):
        pass

    def __call__(self, x, kx=None):
        """
        Arguments
        ---------
        x : ndarray[ne, nx]
            Evaluation points.
        kx : int or None
            Index of derivative (0-based) to return values with respect to.
            None means return function value rather than derivative.

        Returns
        -------
        ndarray[ne, 1]
            Functions values if kx=None or derivative values if kx is an int.
        """
        if not isinstance(x, np.ndarray) or len(x.shape) != 2:
            raise TypeError('x should be a rank-2 array.')
        elif x.shape[1] != self.options['ndim']:
            raise ValueError('The second dimension of x should be %i' %
                             self.options['ndim'])

        if kx is not None:
            if not isinstance(kx, int) or kx < 0:
                raise TypeError('kx should be None or a non-negative int.')

        return self._evaluate(x, kx)
Example #6
0
class SurrogateModel(object):
    """
    Base class for all surrogate models.

    Attributes
    ----------
    options : OptionsDictionary
        Dictionary of options. Options values can be set on this attribute directly
        or they can be passed in as keyword arguments during instantiation.
    supports : dict
        Dictionary containing information about what this surrogate model supports.

    Examples
    --------
    >>> from smt.surrogate_models import RBF
    >>> sm = RBF(print_training=False)
    >>> sm.options['print_prediction'] = False
    """
    def __init__(self, **kwargs):
        """
        Constructor where values of options can be passed in.

        For the list of options, see the documentation for the surrogate model being used.

        Parameters
        ----------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.

        Examples
        --------
        >>> from smt.surrogate_models import RBF
        >>> sm = RBF(print_global=False)
        """
        self.options = OptionsDictionary()

        self.supports = supports = {}
        supports["training_derivatives"] = False
        supports["derivatives"] = False
        supports["output_derivatives"] = False
        supports["adjoint_api"] = False
        supports["variances"] = False

        declare = self.options.declare

        declare(
            "print_global",
            True,
            types=bool,
            desc="Global print toggle. If False, all printing is suppressed",
        )
        declare(
            "print_training",
            True,
            types=bool,
            desc="Whether to print training information",
        )
        declare(
            "print_prediction",
            True,
            types=bool,
            desc="Whether to print prediction information",
        )
        declare(
            "print_problem",
            True,
            types=bool,
            desc="Whether to print problem information",
        )
        declare("print_solver",
                True,
                types=bool,
                desc="Whether to print solver information")

        self._initialize()
        self.options.update(kwargs)
        self.training_points = defaultdict(dict)
        self.printer = Printer()

    def set_training_values(self, xt, yt, name=None):
        """
        Set training data (values).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        xt = check_2d_array(xt, "xt")
        yt = check_2d_array(yt, "yt")

        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "the first dimension of xt and yt must have the same length")

        self.nt = xt.shape[0]
        self.nx = xt.shape[1]
        self.ny = yt.shape[1]
        kx = 0
        self.training_points[name][kx] = [np.array(xt), np.array(yt)]

    def update_training_values(self, yt, name=None):
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        yt : np.ndarray[nt, ny] or np.ndarray[nt]
            The output values for the nt training points.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        yt = check_2d_array(yt, "yt")

        kx = 0

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values.")

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != yt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values.")

        self.training_points[name][kx][1] = np.array(yt)

    def set_training_derivatives(self, xt, dyt_dxt, kx, name=None):
        """
        Set training data (derivatives).

        Parameters
        ----------
        xt : np.ndarray[nt, nx] or np.ndarray[nt]
            The input values for the nt training points.
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        check_support(self, "training_derivatives")

        xt = check_2d_array(xt, "xt")
        dyt_dxt = check_2d_array(dyt_dxt, "dyt_dxt")

        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "the first dimension of xt and dyt_dxt must have the same length"
            )

        if not isinstance(kx, int):
            raise ValueError("kx must be an int")

        self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]

    def update_training_derivatives(self, dyt_dxt, kx, name=None):
        """
        Update the training data (values) at the previously set input values.

        Parameters
        ----------
        dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
            The derivatives values for the nt training points.
        kx : int
            0-based index of the derivatives being set.
        name : str or None
            An optional label for the group of training points being set.
            This is only used in special situations (e.g., multi-fidelity applications).
        """
        check_support(self, "training_derivatives")

        dyt_dxt = check_2d_array(dyt_dxt, "dyt_dxt")

        if kx not in self.training_points[name]:
            raise ValueError(
                "The training points must be set first with set_training_values "
                + "before calling update_training_values.")

        xt = self.training_points[name][kx][0]
        if xt.shape[0] != dyt_dxt.shape[0]:
            raise ValueError(
                "The number of training points does not agree with the earlier call of "
                + "set_training_values.")

        self.training_points[name][kx + 1][1] = np.array(dyt_dxt)

    def train(self):
        """
        Train the model
        """
        n_exact = self.training_points[None][0][0].shape[0]

        self.printer.active = self.options["print_global"]
        self.printer._line_break()
        self.printer._center(self.name)

        self.printer.active = (self.options["print_global"]
                               and self.options["print_problem"])
        self.printer._title("Problem size")
        self.printer("   %-25s : %i" % ("# training points.", n_exact))
        self.printer()

        self.printer.active = (self.options["print_global"]
                               and self.options["print_training"])
        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Training of the Mixture of experts")
        else:
            self.printer._title("Training")

        # Train the model using the specified model-method
        with self.printer._timed_context("Training", "training"):
            self._train()

    def predict_values(self, x):
        """
        Predict the output values at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        y : np.ndarray[nt, ny]
            Output values at the prediction points.
        """
        x = check_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (self.options["print_global"]
                               and self.options["print_prediction"])

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_values(x)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()
        return y.reshape((n, self.ny))

    def predict_derivatives(self, x, kx):
        """
        Predict the dy_dx derivatives at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        dy_dx : np.ndarray[nt, ny]
            Derivatives.
        """
        check_support(self, "derivatives")
        x = check_2d_array(x, "x")
        check_nx(self.nx, x)
        n = x.shape[0]
        self.printer.active = (self.options["print_global"]
                               and self.options["print_prediction"])

        if self.name == "MixExp":
            # Mixture of experts model
            self.printer._title("Evaluation of the Mixture of experts")
        else:
            self.printer._title("Evaluation")
        self.printer("   %-12s : %i" % ("# eval points.", n))
        self.printer()

        # Evaluate the unknown points using the specified model-method
        with self.printer._timed_context("Predicting", key="prediction"):
            y = self._predict_derivatives(x, kx)

        time_pt = self.printer._time("prediction")[-1] / n
        self.printer()
        self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
        self.printer()

        return y.reshape((n, self.ny))

    def predict_output_derivatives(self, x):
        """
        Predict the derivatives dy_dyt at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        dy_dyt : dict of np.ndarray[nt, nt]
            Dictionary of output derivatives.
            Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
        """
        check_support(self, "output_derivatives")
        check_nx(self.nx, x)

        dy_dyt = self._predict_output_derivatives(x)
        return dy_dyt

    def predict_variances(self, x):
        """
        Predict the variances at a set of points.

        Parameters
        ----------
        x : np.ndarray[nt, nx] or np.ndarray[nt]
            Input values for the prediction points.

        Returns
        -------
        s2 : np.ndarray[nt, ny]
            Variances.
        """
        check_support(self, "variances")
        check_nx(self.nx, x)
        n = x.shape[0]
        s2 = self._predict_variances(x)
        return s2.reshape((n, self.ny))

    def _initialize(self):
        """
        Implemented by surrogate models to declare options and declare what they support (optional).

        Examples
        --------
        self.options.declare('option_name', default_value, types=(bool, int), desc='description')
        self.supports['derivatives'] = True
        """
        pass

    def _train(self):
        """
        Implemented by surrogate models to perform training (optional, but typically implemented).
        """
        pass

    def _predict_values(self, x):
        """
        Implemented by surrogate models to predict the output values.

        Parameters
        ----------
        x : np.ndarray[nt, nx]
            Input values for the prediction points.

        Returns
        -------
        y : np.ndarray[nt, ny]
            Output values at the prediction points.
        """
        raise Exception("This surrogate model is incorrectly implemented")

    def _predict_derivatives(self, x, kx):
        """
        Implemented by surrogate models to predict the dy_dx derivatives (optional).

        If this method is implemented, the surrogate model should have

        ::
            self.supports['derivatives'] = True

        in the _initialize() implementation.

        Parameters
        ----------
        x : np.ndarray[nt, nx]
            Input values for the prediction points.
        kx : int
            The 0-based index of the input variable with respect to which derivatives are desired.

        Returns
        -------
        dy_dx : np.ndarray[nt, ny]
            Derivatives.
        """
        check_support(self, "derivatives", fail=True)

    def _predict_output_derivatives(self, x):
        """
        Implemented by surrogate models to predict the dy_dyt derivatives (optional).

        If this method is implemented, the surrogate model should have

        ::
            self.supports['output_derivatives'] = True

        in the _initialize() implementation.

        Parameters
        ----------
        x : np.ndarray[nt, nx]
            Input values for the prediction points.

        Returns
        -------
        dy_dyt : dict of np.ndarray[nt, nt]
            Dictionary of output derivatives.
            Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
        """
        check_support(self, "output_derivatives", fail=True)

    def _predict_variances(self, x):
        """
        Implemented by surrogate models to predict the variances at a set of points (optional).

        If this method is implemented, the surrogate model should have

        ::
            self.supports['variances'] = True

        in the _initialize() implementation.

        Parameters
        ----------
        x : np.ndarray[nt, nx]
            Input values for the prediction points.

        Returns
        -------
        s2 : np.ndarray[nt, ny]
            Variances.
        """
        check_support(self, "variances", fail=True)
Example #7
0
class Problem(object):
    def __init__(self, **kwargs):
        """
        Constructor where values of options can be passed in.

        For the list of options, see the documentation for the problem being used.

        Parameters
        ----------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.

        Examples
        --------
        >>> from smt.problems import Sphere
        >>> prob = Sphere(ndim=3)
        """
        self.options = OptionsDictionary()
        self.options.declare("ndim", 1, types=int)
        self.options.declare("return_complex", False, types=bool)
        self._initialize()
        self.options.update(kwargs)

        self.xlimits = np.zeros((self.options["ndim"], 2))

        self._setup()

    def _initialize(self):
        """
        Implemented by problem to declare options (optional).

        Examples
        --------
        self.options.declare('option_name', default_value, types=(bool, int), desc='description')
        """
        pass

    def _setup(self):
        pass

    def __call__(self, x, kx=None):
        """
        Evaluate the function.

        Parameters
        ----------
        x : ndarray[n, nx] or ndarray[n]
            Evaluation points where n is the number of evaluation points.
        kx : int or None
            Index of derivative (0-based) to return values with respect to.
            None means return function value rather than derivative.

        Returns
        -------
        ndarray[n, 1]
            Functions values if kx=None or derivative values if kx is an int.
        """
        x = ensure_2d_array(x, "x")

        if x.shape[1] != self.options["ndim"]:
            raise ValueError("The second dimension of x should be %i" %
                             self.options["ndim"])

        if kx is not None:
            if not isinstance(kx, int) or kx < 0:
                raise TypeError("kx should be None or a non-negative int.")

        y = self._evaluate(x, kx)

        if self.options["return_complex"]:
            return y
        else:
            return np.real(y)

    def _evaluate(self, x, kx=None):
        """
        Implemented by surrogate models to evaluate the function.

        Parameters
        ----------
        x : ndarray[n, nx]
            Evaluation points where n is the number of evaluation points.
        kx : int or None
            Index of derivative (0-based) to return values with respect to.
            None means return function value rather than derivative.

        Returns
        -------
        ndarray[n, 1]
            Functions values if kx=None or derivative values if kx is an int.
        """
        raise Exception("This problem has not been implemented correctly")
Example #8
0
class SamplingMethod(object):
    def __init__(self, **kwargs):
        """
        Constructor where values of options can be passed in.

        For the list of options, see the documentation for the problem being used.

        Parameters
        ----------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.

        Examples
        --------
        >>> import numpy as np
        >>> from smt.sampling_methods import Random
        >>> sampling = Random(xlimits=np.arange(2).reshape((1, 2)))
        """
        self.options = OptionsDictionary()
        self.options.declare(
            "xlimits",
            types=np.ndarray,
            desc=
            "The interval of the domain in each dimension with shape nx x 2 (required)",
        )
        self._initialize()
        self.options.update(kwargs)

    def _initialize(self):
        """
        Implemented by sampling methods to declare options (optional).

        Examples
        --------
        self.options.declare('option_name', default_value, types=(bool, int), desc='description')
        """
        pass

    def __call__(self, nt):
        """
        Compute the requested number of sampling points.

        The number of dimensions (nx) is determined based on `xlimits.shape[0]`.

        Arguments
        ---------
        nt : int
            Number of points requested.

        Returns
        -------
        ndarray[nt, nx]
            The sampling locations in the input space.
        """
        xlimits = self.options["xlimits"]
        nx = xlimits.shape[0]

        x = self._compute(nt)
        for kx in range(nx):
            x[:,
              kx] = xlimits[kx,
                            0] + x[:, kx] * (xlimits[kx, 1] - xlimits[kx, 0])

        return x

    def _compute(self, nt):
        """
        Implemented by sampling methods to compute the requested number of sampling points.

        The number of dimensions (nx) is determined based on `xlimits.shape[0]`.

        Arguments
        ---------
        nt : int
            Number of points requested.

        Returns
        -------
        ndarray[nt, nx]
            The sampling locations in the input space.
        """
        raise Exception(
            "This sampling method has not been implemented correctly")
Example #9
0
class localGEKPLS(object):
    """
    Reconstruct a GEKPLS model.
    It can provide predictions of drag/lift/picting moment coefficients with respect to inputs.
    Also, gradient of predictions to inputs are provided.
    In addition, it provides the probobility of given inputs belonging to this local model,
    which is further used to compute the mixture propotion in the Mixture of Experts.
    """
    def __init__(self, **kwargs):
        """

        Examples
        --------
        >>> from methods import localGEKPLS
        >>> local1 = localGEKPLS(para_file='data/local1.npy')
        """
        self.options = OptionsDictionary()
        declare = self.options.declare
        declare(
            'para_file',
            values=None,
            types=str,
            desc=
            'Directory for loading / saving cached data; None means do not save or load'
        )

        self.options.update(kwargs)
        self._readPara()

    def _readPara(self):
        """
        Read parameters from self.options['para_file']
        """
        ## Load the dict using Numpy
        paradict = np.load(self.options['para_file']).item()

        ## Copy items outside
        self.X_mean = paradict['X_mean']
        self.X_std = paradict['X_std']
        self.X_norma = paradict['X_norma']
        self.y_mean = paradict['y_mean']
        self.y_std = paradict['y_std']
        self.optimal_theta = paradict['optimal_theta']
        self.nt = paradict['nt']
        self.optimal_par = paradict['optimal_par']
        self.corr = paradict['corr']
        self.n_comp = paradict['n_comp']
        self.coeff_pls = paradict['coeff_pls']
        self.poly = paradict['poly']

        del paradict

    def _componentwise_distance(self, dx, opt=0):

        d = componentwise_distance_PLS(dx, self.corr, self.n_comp,
                                       self.coeff_pls)
        return d

    def _predict_values(self, x):
        """
        Evaluates the model at a set of points.

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        """
        # Initialization
        n_eval, n_features_x = x.shape
        x = (x - self.X_mean) / self.X_std

        # Get pairwise componentwise L1-distances to the input training set
        dx = manhattan_distances(x,
                                 Y=self.X_norma.copy(),
                                 sum_over_features=False)
        d = self._componentwise_distance(dx)

        # Compute the correlation function
        if self.corr == 'abs_exp':
            r = abs_exp(self.optimal_theta, d).reshape(n_eval, self.nt)
        else:
            r = squar_exp(self.optimal_theta, d).reshape(n_eval, self.nt)

        y = np.zeros(n_eval)

        # Compute the regression function
        if self.poly == 'constant':
            f = constant(x)
        elif self.poly == 'linear':
            f = linear(x)
        else:
            f = quadratic(x)

        # Scaled predictor
        y_ = np.dot(f, self.optimal_par['beta']) + np.dot(
            r, self.optimal_par['gamma'])
        # Predictor
        y = (self.y_mean + self.y_std * y_).ravel()

        return y
Example #10
0
class SM(object):
    '''
    Base class for all model methods.
    '''
    def __init__(self, **kwargs):
        '''
        Constructor.

        Arguments
        ---------
        **kwargs : named arguments
            Set of options that can be optionally set; each option must have been declared.
        '''
        self.options = OptionsDictionary()
        self._declare_options()
        self.options.update(kwargs)

        self.training_pts = {'exact': {}}

        self.printer = Printer()

    def _declare_options(self):
        declare = self.options.declare

        declare(
            'print_global',
            True,
            types=bool,
            desc='Global print toggle. If False, all printing is suppressed')
        declare('print_training',
                True,
                types=bool,
                desc='Whether to print training information')
        declare('print_prediction',
                True,
                types=bool,
                desc='Whether to print prediction information')
        declare('print_problem',
                True,
                types=bool,
                desc='Whether to print problem information')
        declare('print_solver',
                True,
                types=bool,
                desc='Whether to print solver information')

    def compute_rms_error(self, xe=None, ye=None, kx=None):
        '''
        Returns the RMS error of the training points or the given points.

        Arguments
        ---------
        xe : np.ndarray[ne, dim] or None
            Input values. If None, the input values at the training points are used instead.
        ye : np.ndarray[ne, 1] or None
            Output / deriv. values. If None, the training pt. outputs / derivs. are used.
        kx : int or None
            If None, we are checking the output values.
            If int, we are checking the derivs. w.r.t. the kx^{th} input variable (0-based).
        '''
        if xe is not None and ye is not None:
            ye2 = self.predict(xe, kx)
            return np.linalg.norm(ye2 - ye) / np.linalg.norm(ye)
        elif xe is None and ye is None:
            num = 0.
            den = 0.
            if kx is None:
                kx2 = 0
            else:
                kx2 += 1
            if kx2 not in self.training_pts['exact']:
                raise ValueError(
                    'There is no training point data available for kx %s' %
                    kx2)
            xt, yt = self.training_pts['exact'][kx2]
            yt2 = self.predict(xt, kx)
            num += np.linalg.norm(yt2 - yt)**2
            den += np.linalg.norm(yt)**2
            return num**0.5 / den**0.5

    def add_training_pts(self, typ, xt, yt, kx=None):
        '''
        Adds nt training/sample data points

        Arguments
        ---------
        typ : str
            'exact'  if this data are considered as a high-fidelty data
            'approx' if this data are considered as a low-fidelity data (TODO)
        xt : np.ndarray [nt, dim]
            Training point input variable values
        yt : np.ndarray [nt, 1]
            Training point output variable values or derivatives (a vector)
        kx : int or None
            None if this data set represents output variable values
            int  if this data set represents derivatives
                 where it is differentiated w.r.t. the kx^{th}
                 input variable (kx is 0-based)
        '''
        yt = yt.reshape((xt.shape[0], 1))
        #Output or derivative variables
        if kx is None:
            kx = 0
            self.dim = xt.shape[1]
            self.nt = xt.shape[0]
        else:
            kx = kx + 1

        #Construct the input data
        pts = self.training_pts[typ]
        if kx in pts:
            pts[kx][0] = np.vstack([pts[kx][0], xt])
            pts[kx][1] = np.vstack([pts[kx][1], yt])
        else:
            pts[kx] = [np.array(xt), np.array(yt)]

    def train(self):
        '''
        Train the model
        '''
        n_exact = self.training_pts['exact'][0][0].shape[0]

        self.printer.active = self.options['print_global']
        self.printer._line_break()
        self.printer._center(self.options['name'])

        self.printer.active = self.options['print_global'] and self.options[
            'print_problem']
        self.printer._title('Problem size')
        self.printer('   %-25s : %i' % ('# training pts.', n_exact))
        self.printer()

        self.printer.active = self.options['print_global'] and self.options[
            'print_training']
        if self.options['name'] == 'MixExp':
            # Mixture of experts model
            self.printer._title('Training of the Mixture of experts')
        else:
            self.printer._title('Training')

        #Train the model using the specified model-method
        with self.printer._timed_context('Training', 'training'):
            self.fit()

    def predict(self, x, kx=None):
        '''
        Evaluates the model at a set of unknown points

        Arguments
        ---------
        x : np.ndarray [n_evals, dim]
            Evaluation point input variable values
        kx : int or None
            None if evaluation of the interpolant is desired.
            int  if evaluation of derivatives of the interpolant is desired
                 with respect to the kx^{th} input variable (kx is 0-based).

        Returns
        -------
        y : np.ndarray
            Evaluation point output variable values
        '''
        n_evals = x.shape[0]

        self.printer.active = self.options['print_global'] and self.options[
            'print_prediction']

        if self.options['name'] == 'MixExp':
            # Mixture of experts model
            self.printer._title('Evaluation of the Mixture of experts')
        else:
            self.printer._title('Evaluation')
        self.printer('   %-12s : %i' % ('# eval pts.', n_evals))
        self.printer()

        #Output or derivative variables
        if kx is None:
            kx = 0
        else:
            kx = kx + 1

        #Evaluate the unknown points using the specified model-method
        with self.printer._timed_context('Predicting', key='prediction'):
            y = self.evaluate(x, kx)

        time_pt = self.printer._time('prediction')[-1] / n_evals
        self.printer()
        self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)
        self.printer()

        return y.reshape(n_evals, 1)
Example #11
0
class MoE(object):
    """
    Construct a global model for Cl/Cd/Cm
    """
    def __init__(self, **kwargs):
        """

        Examples
        --------
        >>> from methods import localGEKPLS
        >>> clmodel = MoE(func='cl',nlocal=20)
        """
        self.options = OptionsDictionary()
        declare = self.options.declare
        declare('func',
                values=('Cl', 'Cd', 'Cm'),
                types=str,
                desc='Which model to construct')
        declare('nlocal',
                values=None,
                types=int,
                desc='How many local models to use')

        self.options.update(kwargs)
        self.models = []
        self.posteriors = []

        self._setup()

    def _setup(self):
        """
        1. Reload local models 
        2. Reload GMM dict
        """
        nlocal = self.options['nlocal']
        funcname = self.options['func']

        for i in xrange(nlocal):
            thispath = './data/' + funcname + str(i) + '.npy'
            thislocal = localGEKPLS(para_file=thispath)
            self.models.append(thislocal)

        self.GGMinfo = np.load('./data/GMM_' + funcname + '.npy').item()

    def _posteriors(self, Xinput):
        """
        Provide posteriors of prediction points Xinput
        
        Parameters
        ---------
        Xinput : np.ndarray [nevals, dim]
               Evaluation point input variable values

        Returns
        -------
        posteriors : np.ndarray [nevals, ncluster]
               posteriors of each points on each local model
        """
        nevals = Xinput.shape[0]
        dim = Xinput.shape[1]
        nc = int((dim - 2) / 2)

        #xdata = np.zeros((nevals,4))
        xdata = np.zeros((nevals, 2))
        xdata[:, 0] = Xinput[:, nc].copy()
        xdata[:, 1] = Xinput[:, 0].copy()
        #xdata[:,2] = Xinput[:,dim-2].copy()
        #xdata[:,3] = Xinput[:,dim-1].copy()

        indevalsClusters, posteriors, classList, clusterCount = eval_gaussianClassifier(
            xdata,
            self.GGMinfo['pi'],
            self.GGMinfo['mu'],
            self.GGMinfo['Sigma'],
            weight=3.0)

        return posteriors

    def predict(self, Xinput):
        """
        Provide predictions
        
        Parameters
        ---------
        Xinput : np.ndarray [nevals, dim]
               Evaluation point input variable values

        Returns
        -------
        Yhat   : np.ndarray [nevals]
               predictions of each points 
        """
        nevals = Xinput.shape[0]
        nlocal = self.options['nlocal']

        posteriors = self._posteriors(Xinput)

        # Gather predictions of all local models
        localys = []
        for ilocal in xrange(nlocal):
            thisy = self.models[ilocal]._predict_values(Xinput)
            localys.append(thisy.copy())

        # for the weighted average prediction
        Yhat = np.zeros(nevals)
        for ip in xrange(nevals):
            for ilocal in xrange(nlocal):
                Yhat[ip] += posteriors[ip, ilocal] * localys[ilocal][ip]

        return Yhat