Ejemplo n.º 1
0
 def __len__(self):
     length = 1
     for v in self.state.values():
         if np.length(v) == 1:
             pass
         elif length == 1:
             length = np.length(v)
         elif length == np.length(v):
             pass
         else:
             raise ValueError(
                 "State variables are appear vectorized, but of different lengths!"
             )
     return length
Ejemplo n.º 2
0
    def alpha(
        self,
        alpha: Union[float, np.ndarray],
        start_at: Union[float, None] = 0,
    ) -> Dict[str, np.ndarray]:
        """
        Execute XFoil at a given angle of attack, or at a sequence of angles of attack.

        Args:

            alpha: The angle of attack [degrees]. Can be either a float or an iterable of floats, such as an array.

            start_at: Chooses whether to split a large sweep into two runs that diverge away from some central value,
            to improve convergence. As an example, if you wanted to sweep from alpha=-20 to alpha=20, you might want
            to instead do two sweeps and stitch them together: 0 to 20, and 0 to -20. `start_at` can be either:

                * None, in which case the alpha inputs are run as a single sequence in the order given.

                * A float that corresponds to an angle of attack (in degrees), in which case the alpha inputs are
                split into two sequences that diverge from the `start_at` value. Successful runs are then sorted by
                `alpha` before returning.

        Returns: A dictionary with the XFoil results. Dictionary values are arrays; they may not be the same shape as
        your input array if some points did not converge.

        """
        alphas = np.array(alpha).reshape(-1)

        if np.length(alphas) > 1:
            if start_at is not None:
                if np.min(alphas) < start_at < np.max(alphas):
                    alphas = np.sort(alphas)
                    alphas_upper = alphas[alphas > start_at]
                    alphas_lower = alphas[alpha <= start_at][::-1]

                    output = self._run_xfoil(
                        "\n".join([f"a {a}" for a in alphas_upper] + ["init"] +
                                  [f"a {a}" for a in alphas_lower]))

                    sort_order = np.argsort(output['alpha'])
                    output = {k: v[sort_order] for k, v in output.items()}
                    return output

        return self._run_xfoil("\n".join([f"a {a}" for a in alphas]))
Ejemplo n.º 3
0
    def __init__(self,
                 x_data: Union[np.ndarray, Dict[str, np.ndarray]],
                 y_data: np.ndarray,
                 x_data_resample: Union[int, Dict[str, Union[int, np.ndarray]]] = 10,
                 resampling_interpolator: object = interpolate.RBFInterpolator,
                 resampling_interpolator_kwargs: Dict[str, Any] = None,
                 fill_value=np.NaN,  # Default behavior: return NaN for all inputs outside data range.
                 interpolated_model_kwargs: Dict[str, Any] = None,
                 ):
        """
        Creates the interpolator. Note that data must be unstructured (i.e., point cloud) for general N-dimensional
        interpolation.

        Note that if data is either 1D or structured,

        Args:

            x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
            var_name:var_data}.

                * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
                as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)

            y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]

            x_data_resample: A parameter that guides how the x_data should be resampled onto a structured grid.

                * If this is an int, we look at each axis of the `x_data` (here, we'll call this `xi`),
                and we resample onto a linearly-spaced grid between `min(xi)` and `max(xi)` with `x_data_resample`
                points.

                * If this is a dict, it must be a dict where the keys are strings matching the keys of (the
                dictionary) `x_data`. The values can either be ints or 1D np.ndarrays.

                    * If the values are ints, then that axis is linearly spaced between `min(xi)` and `max(xi)` with
                    `x_data_resample` points.

                    * If the values are 1D np.ndarrays, then those 1D np.ndarrays are used as the resampled spacing
                    for the given axis.

            resampling_interpolator: Indicates the interpolator to use in order to resample the unstructured data
            onto a structured grid. Should be analogous to scipy.interpolate.RBFInterpolator in __init__ and __call__
            syntax. See reference here:

                * https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RBFInterpolator.html

            resampling_interpolator_kwargs: Indicates keyword arguments (keyword-value pairs, as a dictionary) to
            pass into the resampling interpolator.

            fill_value: Gives the value that the interpolator should return for points outside of the interpolation
            domain. The interpolation domain is defined as the hypercube bounded by the coordinates specified in
            `x_data_resample`. By default, these coordinates are the tightest axis-aligned hypercube that bounds the
            point cloud data. If fill_value is None, then the interpolator will attempt to extrapolate if the interpolation method allows.

            interpolated_model_kwargs: Indicates keyword arguments to pass into the (structured) InterpolatedModel.
            Also a dictionary. See aerosandbox.InterpolatedModel for documentation on possible inputs here.

        """
        if resampling_interpolator_kwargs is None:
            resampling_interpolator_kwargs = {}
        if interpolated_model_kwargs is None:
            interpolated_model_kwargs = {}

        try:  # Try to use the InterpolatedModel initializer. If it doesn't work, then move on.
            super().__init__(
                x_data_coordinates=x_data,
                y_data_structured=y_data,
            )
            return
        except ValueError:
            pass

        # If it didn't work, this implies that x_data is multidimensional, and hence a dict-like object. Validate this.
        try:  # Determine type of `x_data`
            x_data.keys()
            x_data.values()
            x_data.items()
        except AttributeError:
            raise TypeError("`x_data` must be a dict-like object!")

        # Make the interpolator, based on x_data and y_data.
        if resampling_interpolator == interpolate.RBFInterpolator:
            resampling_interpolator_kwargs = {
                "kernel": "thin_plate_spline",
                "degree": 1,
                **resampling_interpolator_kwargs
            }

        interpolator = resampling_interpolator(
            y=np.stack(tuple(x_data.values()), axis=1),
            d=y_data,
            **resampling_interpolator_kwargs
        )

        # If x_data_resample is an int, make it into a dict that matches x_data.
        if isinstance(x_data_resample, int):
            x_data_resample = {
                k: x_data_resample
                for k in x_data.keys()
            }

        # Now, x_data_resample should be dict-like. Validate this.
        try:
            x_data_resample.keys()
            x_data_resample.values()
            x_data_resample.items()
        except AttributeError:
            raise TypeError("`x_data_resample` must be a dict-like object!")

        # Go through x_data_resample, and replace any values that are ints with linspaced arrays.
        for k, v in x_data_resample.items():
            if isinstance(v, int):
                x_data_resample[k] = np.linspace(
                    np.min(x_data[k]),
                    np.max(x_data[k]),
                    v
                )

        x_data_coordinates: Dict = x_data_resample

        x_data_structured_values = [
            xi.flatten()
            for xi in np.meshgrid(*x_data_coordinates.values(), indexing="ij")
        ]
        x_data_structured = {
            k: xi
            for k, xi in zip(x_data.keys(), x_data_structured_values)
        }

        y_data_structured = interpolator(
            np.stack(tuple(x_data_structured_values), axis=1)
        )
        y_data_structured = y_data_structured.reshape([
            np.length(xi)
            for xi in x_data_coordinates.values()
        ])

        interpolated_model_kwargs = {
            "fill_value": fill_value,
            **interpolated_model_kwargs
        }

        super().__init__(
            x_data_coordinates=x_data_coordinates,
            y_data_structured=y_data_structured,
            **interpolated_model_kwargs,
        )

        self.x_data_raw_unstructured = x_data
        self.y_data_raw = y_data
Ejemplo n.º 4
0
    def derivative_of(self,
                      variable: cas.MX,
                      with_respect_to: Union[np.ndarray, cas.MX],
                      derivative_init_guess: Union[float, np.ndarray],  # TODO add default
                      derivative_scale: float = None,
                      method: str = "midpoint",
                      explicit: bool = False  # TODO implement explicit
                      ) -> cas.MX:
        """
        Returns a quantity that is either defined or constrained to be a derivative of an existing variable.

        For example:

        >>> opti = Opti()
        >>> position = opti.variable(init_guess=0, n_vars=100)
        >>> time = np.linspace(0, 1, 100)
        >>> velocity = opti.derivative_of(position, with_respect_to=time)
        >>> acceleration = opti.derivative_of(velocity, with_respect_to=time)

        Args:

            variable: The variable or quantity that you are taking the derivative of. The "numerator" of the
            derivative, in colloquial parlance.

            with_respect_to: The variable or quantity that you are taking the derivative with respect to. The
            "denominator" of the derivative, in colloquial parlance.

                In a typical example case, this `with_respect_to` parameter would be time. Please make sure that the
                value of this parameter is monotonically increasing, otherwise you may get nonsensical answers.

            derivative_init_guess: Initial guess for the value of the derivative. Should be either a float (in which
            case the initial guess will be a vector equal to this value) or a vector of initial guesses with the same
            length as `variable`. For more info, look at the docstring of opti.variable()'s `init_guess` parameter.

            derivative_scale: Scale factor for the value of the derivative. For more info, look at the docstring of
            opti.variable()'s `scale` parameter.

            method: The type of integrator to use to define this derivative. Options are:

                * "forward euler" - a first-order-accurate forward Euler method

                    Citation: https://en.wikipedia.org/wiki/Euler_method

                * "backwards euler" - a first-order-accurate backwards Euler method

                    Citation: https://en.wikipedia.org/wiki/Backward_Euler_method

                * "midpoint" or "trapezoid" - a second-order-accurate midpoint method

                    Citation: https://en.wikipedia.org/wiki/Midpoint_method

                * "simpson" - Simpson's rule for integration

                    Citation: https://en.wikipedia.org/wiki/Simpson%27s_rule

                * "runge-kutta" or "rk4" - a fourth-order-accurate Runge-Kutta method. I suppose that technically,
                "forward euler", "backward euler", and "midpoint" are all (lower-order) Runge-Kutta methods...

                    Citation: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#The_Runge%E2%80%93Kutta_method

                * "runge-kutta-3/8" - A modified version of the Runge-Kutta 4 proposed by Kutta in 1901. Also
                fourth-order-accurate, but all of the error coefficients are smaller than they are in the standard
                Runge-Kutta 4 method. The downside is that more floating point operations are required per timestep,
                as the Butcher tableau is more dense (i.e. not banded).

                    Citation: Kutta, Martin (1901), "Beitrag zur näherungsweisen Integration totaler
                    Differentialgleichungen", Zeitschrift für Mathematik und Physik, 46: 435–453

            explicit: If true, returns an explicit derivative rather than an implicit one. In other words,
            this *defines* the output to be a derivative of the input rather than *constraining* the output to the a
            derivative of the input.

                Explicit derivatives result in smaller, denser systems of equations that are more akin to
                shooting-type methods. Implicit derivatives result in larger, sparser systems of equations that are
                more akin to collocation methods. Explicit derivatives are better for simple, stable systems with few
                states, while implicit derivatives are better for complex, potentially-unstable systems with many
                states.

                # TODO implement explicit



        Returns: A vector consisting of the derivative of the parameter `variable` with respect to `with_respect_to`.

        """
        ### Set defaults
        # if with_respect_to is None:
        #     with_respect_to = np.ones(shape=np.length(variable)) # TODO consider whether we want to even allow this...
        # if derivative_init_guess is None:
        #     raise NotImplementedError() # TODO implement default value for this

        ### Check inputs
        N = np.length(variable)
        if not np.length(with_respect_to) == N:
            raise ValueError("The inputs `variable` and `with_respect_to` must be vectors of the same length!")

        ### Clean inputs
        method = method.lower()

        ### Implement the derivative
        if not explicit:
            derivative = self.variable(
                init_guess=derivative_init_guess,
                n_vars=N,
                scale=derivative_scale,
            )

            self.constrain_derivative(
                derivative=derivative,
                variable=variable,
                with_respect_to=with_respect_to,
                method=method,
            )

        else:
            raise NotImplementedError("Haven't yet implemented explicit derivatives! Use implicit ones for now...")

        return derivative
Ejemplo n.º 5
0
    def variable(self,
                 init_guess: Union[float, np.ndarray],
                 n_vars: int = None,
                 scale: float = None,
                 freeze: bool = False,
                 log_transform: bool = False,
                 category: str = "Uncategorized",
                 lower_bound: float = None,
                 upper_bound: float = None,
                 ) -> cas.MX:
        """
        Initializes a new decision variable (or vector of decision variables). You must pass an initial guess (
        `init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be
        overridden; see below for syntax.

        It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for
        nonconvex problems, although this is not strictly required.

        Args:

            init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the
            design space the optimizer will start looking.

                This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar,
                vector) that is created will be automatically inferred from the shape of the initial guess you
                provide here. (Although it can also be overridden using the `n_vars` parameter; see below.)

                For scalar variables, your initial guess should be a float:

                >>> opti = asb.Opti()
                >>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5

                For vector variables, your initial guess should be either:

                    * a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar
                    variable will be created:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

                    * a NumPy ndarray, in which case each element will be initialized to the corresponding value in
                    the given array:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of
                    >>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5.

                In the case where the variable is to be log-transformed (see `log_transform`), the initial guess
                should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the
                initial guess happens under the hood.) The initial guess must, of course, be a positive number in
                this case.

            n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not
            provided, the dimensionality of the variable is inferred from the initial guess `init_guess`.

                The only real case where you need to use this argument would be if you are initializing a vector
                variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`.
                For example:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

            scale: [Optional] Approximate scale of the variable.

                For example, if you're optimizing the design of a automobile and setting the tire diameter as an
                optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters.

                Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer
                converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are
                theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make
                solving poorly-scaled problems really difficult or impossible. See here for more info:
                https://web.casadi.org/blog/nlp-scaling/

                If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`.

            freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In
            order to select the determine to freeze the variable at, the optimizer will use the following logic:

                    * If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze
                    the variable at the value of initial guess.

                        >>> opti = Opti()
                        >>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5.

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, and you didn't manually specify to
                    freeze the variable: the variable will be frozen based on the value in the cache file (and ignore
                    the `init_guess`). Example:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing")
                        >>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess)

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, but you then manually specified that
                    the variable should be frozen: the variable will once again be frozen at the value of `init_guess`:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True)
                        >>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.)

            Motivation for freezing variables:

                The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say
                we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design
                - any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet!
                However, the farther we get into the design process, the more things get "locked in" - we may have
                ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later (
                let's say that we discover that one of our assumptions was too optimistic halfway through the design
                process), we have to make up for that lost margin using only the variables that are still free. To do
                this, we would freeze the variables that are already decided on.

                By categorizing variables, you can also freeze entire categories of variables. For example,
                you can freeze all of the wing design variables for an airplane but leave all of the fuselage
                variables free.

                This idea of freezing variables can also be used to look at off-design performance - freeze a
                design, but change the operating conditions.

            log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable
            before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical
            if negative (e.g. mass). Log-transforming these variables can also help maintain convexity.

            category: [Optional] What category of variables does this belong to?

        Usage notes:

            When using vector variables, individual components of this vector of variables can be accessed via normal
            indexing. Example:
                >>> opti = asb.Opti()
                >>> my_var = opti.variable(n_vars = 5)
                >>> opti.subject_to(my_var[3] >= my_var[2])  # This is a valid way of indexing
                >>> my_sum = asb.sum(my_var)  # This will sum up all elements of `my_var`

        Returns:
            The variable itself as a symbolic CasADi variable (MX type).

        """
        ### Set defaults
        if n_vars is None:  # Infer dimensionality from init_guess if it is not provided
            n_vars = np.length(init_guess)
        if scale is None:  # Infer a scale from init_guess if it is not provided
            if log_transform:
                scale = 1
            else:
                scale = np.mean(np.fabs(init_guess))  # Initialize the scale to a heuristic based on the init_guess
                if scale == 0:  # If that heuristic leads to a scale of 0, use a scale of 1 instead.
                    scale = 1

                # scale = np.fabs(
                #     np.where(
                #         init_guess != 0,
                #         init_guess,
                #         1
                #     ))

        # Validate the inputs
        if log_transform:
            if np.any(init_guess <= 0):
                raise ValueError(
                    "If you are initializing a log-transformed variable, the initial guess(es) must all be positive.")
        if np.any(scale <= 0):
            raise ValueError("The 'scale' argument must be a positive number.")

        # If the variable is in a category to be frozen, fix the variable at the initial guess.
        is_manually_frozen = freeze
        if category in self.variable_categories_to_freeze:
            freeze = True

        # If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
        if freeze:
            var = self.parameter(n_params=n_vars, value=init_guess)
        else:
            if not log_transform:
                var = scale * super().variable(n_vars)
                self.set_initial(var, init_guess)
            else:
                log_scale = scale / init_guess
                log_var = log_scale * super().variable(n_vars)
                var = np.exp(log_var)
                self.set_initial(log_var, np.log(init_guess))

        # Track the variable
        if category not in self.variables_categorized:  # Add a category if it does not exist
            self.variables_categorized[category] = []
        self.variables_categorized[category].append(var)
        var.is_manually_frozen = is_manually_frozen

        # Apply bounds
        if lower_bound is not None:
            self.subject_to(var >= lower_bound)
        if upper_bound is not None:
            self.subject_to(var <= upper_bound)

        return var
Ejemplo n.º 6
0
    def parameter(self,
                  value: Union[float, np.ndarray] = 0.,
                  n_params: int = None,
                  ) -> cas.MX:
        """
        Initializes a new parameter (or vector of parameters). You must pass a value (`value`) upon defining a new
        parameter. Dimensionality is inferred from this value, but it can be overridden; see below for syntax.

        Args:

            value: Value to set the new parameter to.

                This can either be a float or a NumPy ndarray; the dimension of the parameter (i.e. scalar,
                vector) that is created will be automatically inferred from the shape of the value you provide here.
                (Although it can be overridden using the `n_params` parameter; see below.)

                For scalar parameters, your value should be a float:
                >>> opti = asb.Opti()
                >>> scalar_param = opti.parameter(value=5) # Initializes a scalar parameter and sets its value to 5.

                For vector variables, your value should be either:

                    * a float, in which case you must pass the length of the vector as `n_params`, otherwise a scalar
                    parameter will be created:

                    >>> opti = asb.Opti()
                    >>> vector_param = opti.parameter(value=5, n_params=10) # Initializes a vector parameter of length
                    >>> # 10, with all 10 elements set to value of 5.

                    * a NumPy ndarray, in which case each element will be set to the corresponding value in the given
                    array:

                    >>> opti = asb.Opti()
                    >>> vector_param = opti.parameter(value=np.linspace(0, 5, 10)) # Initializes a vector parameter of
                    >>> # length 10, with all 10 elements set to a value varying from 0 to 5.

            n_vars: [Optional] Used to manually override the dimensionality of the parameter to create; if not
            provided, the dimensionality of the parameter is inferred from `value`.

                The only real case where you need to use this argument would be if you are initializing a vector
                parameter to a scalar value, but you don't feel like using `value=my_value * np.ones(n_vars)`.
                For example:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.parameter(value=5, n_vars=10) # Initializes a vector parameter of length
                    >>> # 10, with all 10 elements set to a value of 5.

        Returns:
            The parameter itself as a symbolic CasADi variable (MX type).

        """
        # Infer dimensionality from value if it is not provided
        if n_params is None:
            n_params = np.length(value)

        # Create the parameter
        param = super().parameter(n_params)

        # Set the value of the parameter
        self.set_value(param, value)

        return param
Ejemplo n.º 7
0
    def __init__(
        self,
        model: Callable[
            [Union[np.ndarray,
                   Dict[str, np.ndarray]], Dict[str, float]], np.ndarray],
        x_data: Union[np.ndarray, Dict[str, np.ndarray]],
        y_data: np.ndarray,
        parameter_guesses: Dict[str, float],
        parameter_bounds: Dict[str, tuple] = None,
        residual_norm_type: str = "L2",
        fit_type: str = "best",
        weights: np.ndarray = None,
        put_residuals_in_logspace: bool = False,
        verbose=True,
    ):
        """
        Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach.

        Args:

            model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where:

                * x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n].

                    * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x
                    as a 1D ndarray. (If you do this, just give `x_data` as an array.)

                * p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value].

                Model should return a 1D ndarray of length n.

                Basically, if you've done it right:
                >>> model(x_data, parameter_guesses)
                should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit
                will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types
                should be happy.)

                Model should use aerosandbox.numpy operators.

                The model is not allowed to make any in-place changes to the input `x`. The most common way this
                manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x =
                x + 3`.

            x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
            var_name:var_data}.

                * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
                as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)

            y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]

            parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}.

                * Parameters will be initialized to the values set here; all parameters need an initial guess.

                * param_initial_guess is a float; note that only scalar parameters are allowed.

            parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}.

                * May contain only a subset of param_guesses if desired.

                * Use None to represent one-sided constraints (i.e. (None, 5)).

            residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options:

                * "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers.

                * "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default.

                * "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers.

            fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm),
            or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate
            modeling, so that you can put bounds on modeling error):

                * "best": finds the model of best fit. Usually, this is what you want.

                * "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize
                the specified residual norm).

                * "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize
                the specified residual norm).

            weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform.

                * Weights are automatically normalized. [1D ndarray of length n]

            put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error
            (useful for minimizing percent error).

            Note: If any model outputs or data are negative, this will raise an error!

            verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See
            `aerosandbox.Opti.solve(verbose=)` syntax for more details.

        Returns: A model in the form of a FittedModel object. Some things you can do:
            >>> y = FittedModel(x) # evaluate the FittedModel at new x points
            >>> FittedModel.parameters # directly examine the optimal values of the parameters that were found
            >>> FittedModel.plot() # plot the fit


        """
        super().__init__()

        ##### Prepare all inputs, check types/sizes.

        ### Flatten all inputs
        def flatten(input):
            return np.array(input).flatten()

        try:
            x_data = {k: flatten(v) for k, v in x_data.items()}
            x_data_is_dict = True
        except AttributeError:  # If it's not a dict or dict-like, assume it's a 1D ndarray dataset
            x_data = flatten(x_data)
            x_data_is_dict = False
        y_data = flatten(y_data)
        n_datapoints = np.length(y_data)

        ### Handle weighting
        if weights is None:
            weights = np.ones(n_datapoints)
        else:
            weights = flatten(weights)
        sum_weights = np.sum(weights)
        if sum_weights <= 0:
            raise ValueError("The weights must sum to a positive number!")
        if np.any(weights < 0):
            raise ValueError(
                "No entries of the weights vector are allowed to be negative!")
        weights = weights / np.sum(
            weights)  # Normalize weights so that they sum to 1.

        ### Check format of parameter_bounds input
        if parameter_bounds is None:
            parameter_bounds = {}
        for param_name, v in parameter_bounds.items():
            if param_name not in parameter_guesses.keys():
                raise ValueError(
                    f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses."
                )
            if not np.length(v) == 2:
                raise ValueError(
                    "Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
                    "For one-sided bounds, use None for the unbounded side.")

        ### If putting residuals in logspace, check positivity
        if put_residuals_in_logspace:
            if not np.all(y_data > 0):
                raise ValueError(
                    "You can't fit a model with residuals in logspace if y_data is not entirely positive!"
                )

        ### Check dimensionality of inputs to fitting algorithm
        relevant_inputs = {
            "y_data": y_data,
            "weights": weights,
        }
        try:
            relevant_inputs.update(x_data)
        except TypeError:
            relevant_inputs.update({"x_data": x_data})

        for key, value in relevant_inputs.items():
            # Check that the length of the inputs are consistent
            series_length = np.length(value)
            if not series_length == n_datapoints:
                raise ValueError(
                    f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}."
                )

        ##### Formulate and solve the fitting optimization problem

        ### Initialize an optimization environment
        opti = Opti()

        ### Initialize the parameters as optimization variables
        params = {}
        for param_name, param_initial_guess in parameter_guesses.items():
            if param_name in parameter_bounds:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess,
                    lower_bound=parameter_bounds[param_name][0],
                    upper_bound=parameter_bounds[param_name][1],
                )
            else:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess, )

        ### Evaluate the model at the data points you're trying to fit
        x_data_original = copy.deepcopy(
            x_data
        )  # Make a copy of x_data so that you can determine if the model did in-place operations on x and tattle on the user.

        try:
            y_model = model(x_data, params)  # Evaluate the model
        except Exception:
            raise Exception("""
            There was an error when evaluating the model you supplied with the x_data you supplied.
            Likely possible causes:
                * Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters.
                * Your model should take in p as a dict of parameters, but it does not.
                * Your model assumes x is an array-like but you provided x_data as a dict, or vice versa.
            See the docstring of FittedModel() if you have other usage questions or would like to see examples.
            """)

        try:  ### If the model did in-place operations on x_data, throw an error
            x_data_is_unchanged = np.all(x_data == x_data_original)
        except ValueError:
            x_data_is_unchanged = np.all([
                x_series == x_series_original
                for x_series, x_series_original in zip(x_data, x_data_original)
            ])
        if not x_data_is_unchanged:
            raise TypeError(
                "model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!"
            )
        if y_model is None:  # Make sure that y_model actually returned something sensible
            raise TypeError(
                "model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray."
            )

        ### Compute how far off you are (error)
        if not put_residuals_in_logspace:
            error = y_model - y_data
        else:
            y_model = np.fmax(
                y_model, 1e-300
            )  # Keep y_model very slightly always positive, so that log() doesn't NaN.
            error = np.log(y_model) - np.log(y_data)

        ### Set up the optimization problem to minimize some norm(error), which looks different depending on the norm used:
        if residual_norm_type.lower() == "l1":  # Minimize the L1 norm
            abs_error = opti.variable(init_guess=0, n_vars=np.length(
                y_data))  # Make the abs() of each error entry an opt. var.
            opti.subject_to([
                abs_error >= error,
                abs_error >= -error,
            ])
            opti.minimize(np.sum(weights * abs_error))

        elif residual_norm_type.lower() == "l2":  # Minimize the L2 norm
            opti.minimize(np.sum(weights * error**2))

        elif residual_norm_type.lower(
        ) == "linf":  # Minimize the L-infinity norm
            linf_value = opti.variable(
                init_guess=0
            )  # Make the value of the L-infinity norm an optimization variable
            opti.subject_to([
                linf_value >= weights * error, linf_value >= -weights * error
            ])
            opti.minimize(linf_value)

        else:
            raise ValueError("Bad input for the 'residual_type' parameter.")

        ### Add in the constraints specified by fit_type, which force the model to stay above / below the data points.
        if fit_type == "best":
            pass
        elif fit_type == "upper bound":
            opti.subject_to(y_model >= y_data)
        elif fit_type == "lower bound":
            opti.subject_to(y_model <= y_data)
        else:
            raise ValueError("Bad input for the 'fit_type' parameter.")

        ### Solve
        sol = opti.solve(verbose=verbose)

        ##### Construct a FittedModel

        ### Create a vector of solved parameters
        params_solved = {}
        for param_name in params:
            try:
                params_solved[param_name] = sol.value(params[param_name])
            except:
                params_solved[param_name] = np.NaN

        ### Store all the data and inputs
        self.model = model
        self.x_data = x_data
        self.y_data = y_data
        self.parameters = params_solved
        self.parameter_guesses = parameter_guesses
        self.parameter_bounds = parameter_bounds
        self.residual_norm_type = residual_norm_type
        self.fit_type = fit_type
        self.weights = weights
        self.put_residuals_in_logspace = put_residuals_in_logspace
Ejemplo n.º 8
0
    def draw(
        self,
        vehicle_model: Airplane = None,
        backend: str = "pyvista",
        draw_axes: bool = True,
        scale_vehicle_model: Union[float, None] = None,
        n_vehicles_to_draw: int = 10,
        cg_axes: str = "geometry",
        show: bool = True,
    ):
        if backend == "pyvista":
            import pyvista as pv
            import aerosandbox.tools.pretty_plots as p

            if vehicle_model is None:
                default_vehicle_stl = _asb_root / "dynamics/visualization/default_assets/yf23.stl"
                vehicle_model = pv.read(str(default_vehicle_stl))
            elif isinstance(vehicle_model, pv.PolyData):
                pass
            elif isinstance(vehicle_model, Airplane):
                vehicle_model = vehicle_model.draw(backend="pyvista",
                                                   show=False)
                vehicle_model.rotate_y(
                    180)  # Rotate from geometry axes to body axes.
            elif isinstance(
                    vehicle_model, str
            ):  # Interpret the string as a filepath to a .stl or similar
                try:
                    pv.read(filename=vehicle_model)
                except:
                    raise ValueError("Could not parse `vehicle_model`!")
            else:
                raise TypeError(
                    "`vehicle_model` should be an Airplane or PolyData object."
                )

            x_e = np.array(self.x_e)
            y_e = np.array(self.y_e)
            z_e = np.array(self.z_e)
            if np.length(x_e) == 1:
                x_e = x_e * np.ones(len(self))
            if np.length(y_e) == 1:
                y_e = y_e * np.ones(len(self))
            if np.length(z_e) == 1:
                z_e = z_e * np.ones(len(self))

            if scale_vehicle_model is None:
                trajectory_bounds = np.array([
                    [x_e.min(), x_e.max()],
                    [y_e.min(), y_e.max()],
                    [z_e.min(), z_e.max()],
                ])
                trajectory_size = np.max(np.diff(trajectory_bounds, axis=1))

                vehicle_bounds = np.array(vehicle_model.bounds).reshape((3, 2))
                vehicle_size = np.max(np.diff(vehicle_bounds, axis=1))

                scale_vehicle_model = 0.1 * trajectory_size / vehicle_size

            ### Initialize the plotter
            plotter = pv.Plotter()

            # Set the window title
            title = "ASB Dynamics"
            addenda = []
            if scale_vehicle_model != 1:
                addenda.append(
                    f"Vehicle drawn at {scale_vehicle_model:.2g}x scale")
            addenda.append(f"{self.__class__.__name__} Engine")
            if len(addenda) != 0:
                title = title + f" ({'; '.join(addenda)})"
            plotter.title = title

            # Draw axes and grid
            plotter.add_axes()
            plotter.show_grid(color='gray')

            ### Draw the vehicle
            for i in np.unique(
                    np.round(np.linspace(0,
                                         len(self) - 1,
                                         n_vehicles_to_draw))).astype(int):
                dyn = self[i]
                try:
                    phi = dyn.phi
                except AttributeError:
                    phi = dyn.bank
                try:
                    theta = dyn.theta
                except AttributeError:
                    theta = dyn.gamma
                try:
                    psi = dyn.psi
                except AttributeError:
                    psi = dyn.track

                x_cg_b, y_cg_b, z_cg_b = dyn.convert_axes(dyn.mass_props.x_cg,
                                                          dyn.mass_props.y_cg,
                                                          dyn.mass_props.z_cg,
                                                          from_axes=cg_axes,
                                                          to_axes="body")

                this_vehicle = copy.deepcopy(vehicle_model)
                this_vehicle.translate([
                    -x_cg_b,
                    -y_cg_b,
                    -z_cg_b,
                ],
                                       inplace=True)
                this_vehicle.points *= scale_vehicle_model
                this_vehicle.rotate_x(np.degrees(phi), inplace=True)
                this_vehicle.rotate_y(np.degrees(theta), inplace=True)
                this_vehicle.rotate_z(np.degrees(psi), inplace=True)
                this_vehicle.translate([
                    dyn.x_e,
                    dyn.y_e,
                    dyn.z_e,
                ],
                                       inplace=True)
                plotter.add_mesh(this_vehicle, )
                if draw_axes:
                    rot = np.rotation_matrix_from_euler_angles(phi, theta, psi)
                    axes_scale = 0.5 * np.max(
                        np.diff(np.array(this_vehicle.bounds).reshape((3, -1)),
                                axis=1))
                    origin = np.array([
                        dyn.x_e,
                        dyn.y_e,
                        dyn.z_e,
                    ])
                    for i, c in enumerate(["r", "g", "b"]):
                        plotter.add_mesh(
                            pv.Spline(
                                np.array(
                                    [origin,
                                     origin + rot[:, i] * axes_scale])),
                            color=c,
                            line_width=2.5,
                        )

            for i in range(len(self)):
                ### Draw the trajectory line

                polyline = pv.Spline(np.array([x_e, y_e, z_e]).T)
                plotter.add_mesh(
                    polyline,
                    color=p.adjust_lightness(p.palettes["categorical"][0],
                                             1.2),
                    line_width=3,
                )

            ### Finalize the plotter
            plotter.camera.up = (0, 0, -1)
            plotter.camera.Azimuth(90)
            plotter.camera.Elevation(60)
            if show:
                plotter.show()
            return plotter
Ejemplo n.º 9
0
    def mesh_thin_surface(
        self,
        method="tri",
        chordwise_resolution: int = 36,
        spanwise_resolution: int = 1,
        chordwise_spacing: str = "cosine",
        spanwise_spacing: str = "uniform",
        add_camber: bool = True,
    ) -> Tuple[np.ndarray, List[List[int]]]:
        """
        Meshes the mean camber line of the wing as a thin-sheet body.

        Uses the `(points, faces)` standard mesh format. For reference on this format, see the documentation in
        `aerosandbox.geometry.mesh_utilities`.

        Order of faces:
            * On the right wing (or, if `Wing.symmetric` is `False`, just the wing itself):
                * First face is the face nearest the leading edge of the wing root.
                * Proceeds along a chordwise strip to the trailing edge.
                * Then, goes to the subsequent spanwise location and does another chordwise strip, et cetera until
                  we get to the wing tip.
            * On the left wing (applicable only if `Wing.symmetric` is `True`):
                * Same order: Starts at the root leading edge, goes in chordwise strips.

        Order of vertices within each face:
            * On the right wing (or, if `Wing.symmetric` is `False`, just the wing itself):
                * Front-left
                * Back-left
                * Back-right
                * Front-right
            * On the left wing (applicable only if `Wing.symmetric` is `True`):
                * Front-left
                * Back-left
                * Back-right
                * Front-right

        Args:
            method: Allows choice between "tri" and "quad" meshing.
            chordwise_resolution: Controls the chordwise resolution of the meshing.
            spanwise_resolution: Controls the spanwise resolution of the meshing.
            chordwise_spacing: Controls the chordwise spacing of the meshing. Can be "uniform" or "cosine".
            spanwise_spacing: Controls the spanwise spacing of the meshing. Can be "uniform" or "cosine".
            add_camber: Controls whether to mesh the thin surface with camber (i.e., mean camber line), or just the flat planform.

        Returns: (points, faces) in standard mesh format.

        """
        if chordwise_spacing == "cosine":
            space = np.cosspace
        elif chordwise_spacing == "uniform":
            space = np.linspace
        else:
            raise ValueError("Bad value of 'chordwise_spacing'")

        x_nondim = space(0, 1, chordwise_resolution + 1)

        spanwise_strips = []
        for x_n in x_nondim:
            spanwise_strips.append(
                self.mesh_line(x_nondim=x_n,
                               y_nondim=0,
                               add_camber=add_camber,
                               spanwise_resolution=spanwise_resolution,
                               spanwise_spacing=spanwise_spacing))

        points = np.concatenate(spanwise_strips)

        faces = []

        num_i = np.length(spanwise_strips[0])  # spanwise
        num_j = np.length(spanwise_strips)  # chordwise

        def index_of(iloc, jloc):
            return iloc + jloc * num_i

        def add_face(*indices):
            entry = list(indices)
            if method == "quad":
                faces.append(entry)
            elif method == "tri":
                faces.append([entry[0], entry[1], entry[3]])
                faces.append([entry[1], entry[2], entry[3]])

        for i in range(num_i - 1):
            for j in range(num_j - 1):
                add_face(  # On right wing:
                    index_of(i, j),  # Front-left
                    index_of(i, j + 1),  # Back-left
                    index_of(i + 1, j + 1),  # Back-right
                    index_of(i + 1, j),  # Front-right
                )

        if self.symmetric:
            index_offset = np.length(points)

            points = np.concatenate(
                [points, np.multiply(points, np.array([[1, -1, 1]]))])

            def index_of(iloc, jloc):
                return index_offset + iloc + jloc * num_i

            for i in range(num_i - 1):
                for j in range(num_j - 1):
                    add_face(  # On left wing:
                        index_of(i + 1, j),  # Front-left
                        index_of(i + 1, j + 1),  # Back-left
                        index_of(i, j + 1),  # Back-right
                        index_of(i, j),  # Front-right
                    )

        faces = np.array(faces)

        return points, faces
Ejemplo n.º 10
0
def finite_difference_coefficients(
    x: np.ndarray,
    x0: float = 0,
    derivative_degree: int = 1,
) -> np.ndarray:
    """
    Computes the weights (coefficients) in compact finite differece formulas for any order of derivative
    and to any order of accuracy on one-dimensional grids with arbitrary spacing.

    (Wording above is taken from the paper below, as are docstrings for parameters.)

    Modified from an implementation of:

        Fornberg, Bengt, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids". Oct. 1988.
        Mathematics of Computation, Volume 51, Number 184, pages 699-706.

        PDF: https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf

        More detail: https://en.wikipedia.org/wiki/Finite_difference_coefficient

    Args:

        derivative_degree: The degree of the derivative that you are interested in obtaining. (denoted "M" in the
        paper)

        x: The grid points (not necessarily uniform or in order) that you want to obtain weights for. You must
        provide at least as many grid points as the degree of the derivative that you're interested in, plus 1.

            The order of accuracy of your derivative depends in part on the number of grid points that you provide.
            Specifically:

                order_of_accuracy = n_grid_points - derivative_degree

            (This is in general; can be higher in special cases.)

            For example, if you're evaluating a second derivative and you provide three grid points, you'll have a
            first-order-accurate answer.

            (x is denoted "alpha" in the paper)

        x0: The location that you are interested in obtaining a derivative at. This need not be on a grid point.

    Complexity is O(derivative_degree * len(x) ^ 2)

    Returns: A 1D ndarray corresponding to the coefficients that should be placed on each grid point. In other words,
    the approximate derivative at `x0` is the dot product of `coefficients` and the function values at each of the
    grid points `x`.

    """
    ### Check inputs
    if derivative_degree < 1:
        return ValueError(
            "The parameter derivative_degree must be an integer >= 1.")
    expected_order_of_accuracy = np.length(x) - derivative_degree
    if expected_order_of_accuracy < 1:
        return ValueError(
            "You need to provide at least (derivative_degree+1) grid points in the x vector."
        )

    ### Implement algorithm; notation from paper in docstring.
    N = np.length(x) - 1

    delta = np.zeros(shape=(derivative_degree + 1, N + 1, N + 1), dtype="O")

    delta[0, 0, 0] = 1
    c1 = 1
    for n in range(
            1, N + 1
    ):  # TODO make this algorithm more efficient; we only need to store a fraction of this data.
        c2 = 1
        for v in range(n):
            c3 = x[n] - x[v]
            c2 = c2 * c3
            # if n <= M: # Omitted because d is initialized to zero.
            #     d[n, n - 1, v] = 0
            for m in range(min(n, derivative_degree) + 1):
                delta[m, n, v] = ((x[n] - x0) * delta[m, n - 1, v] -
                                  m * delta[m - 1, n - 1, v]) / c3
        for m in range(min(n, derivative_degree) + 1):
            delta[m, n,
                  n] = (c1 / c2 * (m * delta[m - 1, n - 1, n - 1] -
                                   (x[n - 1] - x0) * delta[m, n - 1, n - 1]))
        c1 = c2

    coefficients_object_array = delta[derivative_degree, -1, :]

    coefficients = np.array([
        *coefficients_object_array
    ])  # Reconstructs using aerosandbox.numpy to intelligently type

    return coefficients