def test_basic_math(types):
    for x in types["all"]:
        for y in types["all"]:
            ### Arithmetic
            x + y
            x - y
            x * y
            x / y
            np.sum(x)  # Sum of all entries of array-like object x

            ### Exponentials & Powers
            x**y
            np.power(x, y)
            np.exp(x)
            np.log(x)
            np.log10(x)
            np.sqrt(x)  # Note: do x ** 0.5 rather than np.sqrt(x).

            ### Trig
            np.sin(x)
            np.cos(x)
            np.tan(x)
            np.arcsin(x)
            np.arccos(x)
            np.arctan(x)
            np.arctan2(y, x)
            np.sinh(x)
            np.cosh(x)
            np.tanh(x)
            np.arcsinh(x)
            np.arccosh(x)
            np.arctanh(x - 0.5)  # `- 0.5` to give valid argument
Exemple #2
0
def Cd_cylinder(Re_D: float, subcritical_only=False) -> float:
    """
    Returns the drag coefficient of a cylinder in crossflow as a function of its Reynolds number.
    :param Re_D: Reynolds number, referenced to diameter
    :param subcritical_only: Determines whether the model models purely subcritical (Re < 300k) cylinder flows. Useful, since
    this model is now convex and can be more well-behaved.
    :return: Drag coefficient

    # TODO rework this function to use tanh blending, which will mitigate overflows
    """
    csigc = 5.5766722118597247
    csigh = 23.7460859935990563
    csub0 = -0.6989492360435040
    csub1 = 1.0465189382830078
    csub2 = 0.7044228755898569
    csub3 = 0.0846501115443938
    csup0 = -0.0823564417206403
    csupc = 6.8020230357616764
    csuph = 9.9999999999999787
    csupscl = -0.4570690347113859

    x = np.log10(np.abs(Re_D) + 1e-16)

    if subcritical_only:
        Cd = 10**(csub0 * x + csub1) + csub2 + csub3 * x
    else:
        log10_Cd = (
            (np.log10(10**(csub0 * x + csub1) + csub2 + csub3 * x)) *
            (1 - 1 / (1 + np.exp(-csigh * (x - csigc)))) +
            (csup0 + csupscl / csuph * np.log(np.exp(csuph *
                                                     (csupc - x)) + 1)) *
            (1 / (1 + np.exp(-csigh * (x - csigc)))))
        Cd = 10**log10_Cd

    return Cd
def test_softmax(plot=False):
    # Test softmax
    x = np.linspace(-10, 10, 100)
    y1 = x
    y2 = -2 * x - 3
    hardness = 0.5

    y_soft = np.softmax(y1, y2, hardness=hardness)

    assert np.softmax(0, 0, hardness=1) == np.log(2)

    if plot:
        import matplotlib.pyplot as plt
        import seaborn as sns

        sns.set(font_scale=1)

        fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
        plt.plot(x, y1, label="y1")
        plt.plot(x, y2, label="y2")
        plt.plot(x, y_soft, label="softmax")
        plt.xlabel(r"x")
        plt.ylabel(r"y")
        plt.title(r"Softmax")
        plt.tight_layout()
        plt.legend()
        plt.show()
Exemple #4
0
def test_Linf_without_x_in_dict():
    np.random.seed(0)  # Set a seed for repeatability

    ### Making data
    hour = np.linspace(1, 10, 100)
    noise = 0.1 * np.random.randn(len(hour))
    temperature_c = np.log(hour) + noise

    ### Fit
    def model(x, p):
        return p["m"] * x + p["b"]

    x_data = hour
    y_data = temperature_c

    fitted_model = FittedModel(
        model=model,
        x_data=x_data,
        y_data=y_data,
        parameter_guesses={
            "m": 0,
            "b": 0,
        },
        residual_norm_type="Linf",
    )

    # Check that the fit is right
    assert fitted_model.parameters["m"] == pytest.approx(0.247116, abs=1e-5)
    assert fitted_model.parameters["b"] == pytest.approx(0.227797, abs=1e-5)
Exemple #5
0
 def CM_function(alpha, Re, mach=0, deflection=0):
     alpha = np.mod(alpha + 180,
                    360) - 180  # Keep alpha in the valid range.
     CM_attached = CM_attached_interpolator({
         "alpha": alpha,
         "ln_Re": np.log(Re),
     })
     CM_separated = CM_separated_interpolator(alpha)
     return np.blend(separation_parameter(alpha, Re), CM_separated,
                     CM_attached)
Exemple #6
0
def test_type_errors():
    np.random.seed(0)  # Set a seed for repeatability

    ### Making data
    hour = np.linspace(1, 10, 100)
    noise = 0.1 * np.random.randn(len(hour))
    temperature_c = np.log(hour) + noise

    ### Fit
    def model(x, p):
        return p["m"] * x + p["b"]

    x_data = hour
    y_data = temperature_c

    fitted_model = FittedModel(
        model=model,
        x_data=x_data,
        y_data=y_data,
        parameter_guesses={
            "m": 0,
            "b": 0,
        },
        residual_norm_type="Linf",
    )

    fitted_model(5)

    with pytest.raises(TypeError):
        fitted_model({"temperature": 5})

    def model(x, p):
        return p["m"] * x["hour"] + p["b"]

    fitted_model = FittedModel(
        model=model,
        x_data={"hour": hour},
        y_data=y_data,
        parameter_guesses={
            "m": 0,
            "b": 0,
        },
        residual_norm_type="Linf",
    )

    fitted_model({"hour": 5})

    with pytest.raises(TypeError):
        fitted_model(5)
Exemple #7
0
 def CD_function(alpha, Re, mach=0, deflection=0):
     alpha = np.mod(alpha + 180,
                    360) - 180  # Keep alpha in the valid range.
     log10_CD_attached = log10_CD_attached_interpolator({
         "alpha":
         alpha,
         "ln_Re":
         np.log(Re),
     })
     log10_CD_separated = log10_CD_separated_interpolator(alpha)
     return 10**np.blend(
         separation_parameter(alpha, Re),
         log10_CD_separated,
         log10_CD_attached,
     )
Exemple #8
0
def smoothmax(value1, value2, hardness):
    """
    A smooth maximum between two functions. Also referred to as the logsumexp() function.
    Useful because it's differentiable and preserves convexity!
    Great writeup by John D Cook here:
        https://www.johndcook.com/soft_maximum.pdf
    :param value1: Value of function 1.
    :param value2: Value of function 2.
    :param hardness: Hardness parameter. Higher values make this closer to max(x1, x2).
    :return: Soft maximum of the two supplied values.
    """
    value1 = value1 * hardness
    value2 = value2 * hardness
    max = np.fmax(value1, value2)
    min = np.fmin(value1, value2)
    out = max + np.log(1 + np.exp(min - max))
    out /= hardness
    return out
Exemple #9
0
def Cd_profile_2412(alpha, Re_c):
    # A curve fit I did to a NACA 2412 airfoil in incompressible flow.
    # Within -2 < alpha < 12 and 10^5 < Re_c < 10^7, has R^2 = 0.9713

    Re_c = np.fmax(Re_c, 1)
    log_Re = np.log(Re_c)

    CD0 = -5.249
    Re0 = 15.61
    Re1 = 15.31
    alpha0 = 1.049
    alpha1 = -4.715
    cx = 0.009528
    cxy = -0.00588
    cy = 0.04838

    log_CD = CD0 + cx * (alpha - alpha0) ** 2 + cy * (log_Re - Re0) ** 2 + cxy * (alpha - alpha1) * (
            log_Re - Re1)  # basically, a rotated paraboloid in logspace
    CD = np.exp(log_CD)

    return CD
Exemple #10
0
def softmax(*args, hardness=1):
    """
    An element-wise softmax between two or more arrays. Also referred to as the logsumexp() function.

    Useful for optimization because it's differentiable and preserves convexity!

    Great writeup by John D Cook here:
        https://www.johndcook.com/soft_maximum.pdf

    Args:
        Provide any number of arguments as values to take the softmax of.

        hardness: Hardness parameter. Higher values make this closer to max(x1, x2).

    Returns:
        Soft maximum of the supplied values.
    """
    if hardness <= 0:
        raise ValueError("The value of `hardness` must be positive.")

    if len(args) <= 1:
        raise ValueError("You must call softmax with the value of two or more arrays that you'd like to take the "
                         "element-wise softmax of.")

    ### Scale the args by hardness
    args = [arg * hardness for arg in args]

    ### Find the element-wise max and min of the arrays:
    min = args[0]
    max = args[0]
    for arg in args[1:]:
        min = _np.fmin(min, arg)
        max = _np.fmax(max, arg)

    out = max + _np.log(sum(
            [_np.exp(array - max) for array in args]
        )
    )
    out = out / hardness
    return out
def _calculate_induced_velocity_line_singularity_panel_coordinates(
    xp_field: Union[float, np.ndarray],
    yp_field: Union[float, np.ndarray],
    gamma_start: float = 0.,
    gamma_end: float = 0.,
    sigma_start: float = 0.,
    sigma_end: float = 0.,
    xp_panel_end: float = 1.,
) -> [Union[float, np.ndarray], Union[float, np.ndarray]]:
    """
    Calculates the induced velocity at a point (xp_field, yp_field) in a 2D potential-flow flowfield.

    The `p` suffix in `xp...` and `yp...` denotes the use of the panel coordinate system, where:
        * xp_hat is along the length of the panel
        * yp_hat is orthogonal (90 deg. counterclockwise) to it.

    In this flowfield, there is only one singularity element: A line vortex going from (0, 0) to (xp_panel_end, 0).
    The strength of this vortex varies linearly from:
        * gamma_start at (0, 0), to:
        * gamma_end at (xp_panel_end, 0). # TODO update paragraph

    By convention here, positive gamma induces clockwise swirl in the flow field.
        
    Function returns the 2D velocity u, v in the local coordinate system of the panel.

    Inputs x and y can be 1D ndarrays representing various field points,
    in which case the resulting velocities u and v have corresponding dimensionality.

    Equations from the seminal textbook "Low Speed Aerodynamics" by Katz and Plotkin.
    Vortex equations are Eq. 11.99 and Eq. 11.100.
        * Note: there is an error in equation 11.100 in Katz and Plotkin, at least in the 2nd ed:
        The last term of equation 11.100, which is given as:
            (x_{j+1} - x_j) / z + (theta_{j+1} - theta_j)
        has a sign error and should instead be written as:
            (x_{j+1} - x_j) / z - (theta_{j+1} - theta_j)
    Source equations are Eq. 11.89 and Eq. 11.90.

    """
    ### Modify any incoming floats
    if isinstance(xp_field, (float, int)):
        xp_field = np.array([xp_field])
    if isinstance(yp_field, (float, int)):
        yp_field = np.array([yp_field])

    ### Determine if you can skip either the vortex or source parts
    skip_vortex_math = not (isinstance(gamma_start, cas.MX) or isinstance(
        gamma_end, cas.MX)) and gamma_start == 0 and gamma_end == 0
    skip_source_math = not (isinstance(sigma_start, cas.MX) or isinstance(
        sigma_end, cas.MX)) and sigma_start == 0 and sigma_end == 0

    ### Determine which points are effectively on the panel, necessitating different math:
    is_on_panel = np.fabs(yp_field) <= 1e-8

    ### Do some geometry calculation
    r_1 = (xp_field**2 + yp_field**2)**0.5
    r_2 = ((xp_field - xp_panel_end)**2 + yp_field**2)**0.5

    ### Regularize
    is_on_endpoint = ((r_1 == 0) | (r_2 == 0))
    r_1 = np.where(
        r_1 == 0,
        1,
        r_1,
    )
    r_2 = np.where(r_2 == 0, 1, r_2)

    ### Continue geometry calculation
    theta_1 = np.arctan2(yp_field, xp_field)
    theta_2 = np.arctan2(yp_field, xp_field - xp_panel_end)
    ln_r_2_r_1 = np.log(r_2 / r_1)
    d_theta = theta_2 - theta_1
    tau = 2 * np.pi

    ### Regularize if the point is on the panel.
    yp_field_regularized = np.where(is_on_panel, 1, yp_field)

    ### VORTEX MATH
    if skip_vortex_math:
        u_vortex = 0
        v_vortex = 0
    else:
        d_gamma = gamma_end - gamma_start
        u_vortex_term_1_quantity = (yp_field / tau * d_gamma / xp_panel_end)
        u_vortex_term_2_quantity = (gamma_start * xp_panel_end +
                                    d_gamma * xp_field) / (tau * xp_panel_end)

        # Calculate u_vortex
        u_vortex_term_1 = u_vortex_term_1_quantity * ln_r_2_r_1
        u_vortex_term_2 = u_vortex_term_2_quantity * d_theta
        u_vortex = u_vortex_term_1 + u_vortex_term_2

        # Correct the u-velocity if field point is on the panel
        u_vortex = np.where(is_on_panel, 0, u_vortex)

        # Calculate v_vortex
        v_vortex_term_1 = u_vortex_term_2_quantity * ln_r_2_r_1

        v_vortex_term_2 = np.where(
            is_on_panel,
            d_gamma / tau,
            u_vortex_term_1_quantity *
            (xp_panel_end / yp_field_regularized - d_theta),
        )

        v_vortex = v_vortex_term_1 + v_vortex_term_2

    ### SOURCE MATH
    if skip_source_math:
        u_source = 0
        v_source = 0
    else:
        d_sigma = sigma_end - sigma_start
        v_source_term_1_quantity = (yp_field / tau * d_sigma / xp_panel_end)
        v_source_term_2_quantity = (sigma_start * xp_panel_end +
                                    d_sigma * xp_field) / (tau * xp_panel_end)
        # Calculate v_source
        v_source_term_1 = -v_source_term_1_quantity * ln_r_2_r_1
        v_source_term_2 = v_source_term_2_quantity * d_theta
        v_source = v_source_term_1 + v_source_term_2

        # Correct the v-velocity if field point is on the panel
        v_source = np.where(is_on_panel, 0, v_source)

        # Calculate u_source
        u_source_term_1 = -v_source_term_2_quantity * ln_r_2_r_1

        u_source_term_2 = np.where(
            is_on_panel,
            -d_sigma / tau,
            -v_source_term_1_quantity *
            (xp_panel_end / yp_field_regularized - d_theta),
        )

        u_source = u_source_term_1 + u_source_term_2

    ### Return
    u = u_vortex + u_source
    v = v_vortex + v_source

    ### If the field point is on the endpoint of the panel, replace the NaN with a zero.
    u = np.where(is_on_endpoint, 0, u)
    v = np.where(is_on_endpoint, 0, v)

    return u, v
Exemple #12
0
    def __init__(
        self,
        model: Callable[
            [Union[np.ndarray,
                   Dict[str, np.ndarray]], Dict[str, float]], np.ndarray],
        x_data: Union[np.ndarray, Dict[str, np.ndarray]],
        y_data: np.ndarray,
        parameter_guesses: Dict[str, float],
        parameter_bounds: Dict[str, tuple] = None,
        residual_norm_type: str = "L2",
        fit_type: str = "best",
        weights: np.ndarray = None,
        put_residuals_in_logspace: bool = False,
        verbose=True,
    ):
        """
        Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach.

        Args:

            model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where:

                * x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n].

                    * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x
                    as a 1D ndarray. (If you do this, just give `x_data` as an array.)

                * p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value].

                Model should return a 1D ndarray of length n.

                Basically, if you've done it right:
                >>> model(x_data, parameter_guesses)
                should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit
                will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types
                should be happy.)

                Model should use aerosandbox.numpy operators.

                The model is not allowed to make any in-place changes to the input `x`. The most common way this
                manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x =
                x + 3`.

            x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
            var_name:var_data}.

                * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
                as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)

            y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]

            parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}.

                * Parameters will be initialized to the values set here; all parameters need an initial guess.

                * param_initial_guess is a float; note that only scalar parameters are allowed.

            parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}.

                * May contain only a subset of param_guesses if desired.

                * Use None to represent one-sided constraints (i.e. (None, 5)).

            residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options:

                * "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers.

                * "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default.

                * "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers.

            fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm),
            or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate
            modeling, so that you can put bounds on modeling error):

                * "best": finds the model of best fit. Usually, this is what you want.

                * "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize
                the specified residual norm).

                * "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize
                the specified residual norm).

            weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform.

                * Weights are automatically normalized. [1D ndarray of length n]

            put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error
            (useful for minimizing percent error).

            Note: If any model outputs or data are negative, this will raise an error!

            verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See
            `aerosandbox.Opti.solve(verbose=)` syntax for more details.

        Returns: A model in the form of a FittedModel object. Some things you can do:
            >>> y = FittedModel(x) # evaluate the FittedModel at new x points
            >>> FittedModel.parameters # directly examine the optimal values of the parameters that were found
            >>> FittedModel.plot() # plot the fit


        """
        super().__init__()

        ##### Prepare all inputs, check types/sizes.

        ### Flatten all inputs
        def flatten(input):
            return np.array(input).flatten()

        try:
            x_data = {k: flatten(v) for k, v in x_data.items()}
            x_data_is_dict = True
        except AttributeError:  # If it's not a dict or dict-like, assume it's a 1D ndarray dataset
            x_data = flatten(x_data)
            x_data_is_dict = False
        y_data = flatten(y_data)
        n_datapoints = np.length(y_data)

        ### Handle weighting
        if weights is None:
            weights = np.ones(n_datapoints)
        else:
            weights = flatten(weights)
        sum_weights = np.sum(weights)
        if sum_weights <= 0:
            raise ValueError("The weights must sum to a positive number!")
        if np.any(weights < 0):
            raise ValueError(
                "No entries of the weights vector are allowed to be negative!")
        weights = weights / np.sum(
            weights)  # Normalize weights so that they sum to 1.

        ### Check format of parameter_bounds input
        if parameter_bounds is None:
            parameter_bounds = {}
        for param_name, v in parameter_bounds.items():
            if param_name not in parameter_guesses.keys():
                raise ValueError(
                    f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses."
                )
            if not np.length(v) == 2:
                raise ValueError(
                    "Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
                    "For one-sided bounds, use None for the unbounded side.")

        ### If putting residuals in logspace, check positivity
        if put_residuals_in_logspace:
            if not np.all(y_data > 0):
                raise ValueError(
                    "You can't fit a model with residuals in logspace if y_data is not entirely positive!"
                )

        ### Check dimensionality of inputs to fitting algorithm
        relevant_inputs = {
            "y_data": y_data,
            "weights": weights,
        }
        try:
            relevant_inputs.update(x_data)
        except TypeError:
            relevant_inputs.update({"x_data": x_data})

        for key, value in relevant_inputs.items():
            # Check that the length of the inputs are consistent
            series_length = np.length(value)
            if not series_length == n_datapoints:
                raise ValueError(
                    f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}."
                )

        ##### Formulate and solve the fitting optimization problem

        ### Initialize an optimization environment
        opti = Opti()

        ### Initialize the parameters as optimization variables
        params = {}
        for param_name, param_initial_guess in parameter_guesses.items():
            if param_name in parameter_bounds:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess,
                    lower_bound=parameter_bounds[param_name][0],
                    upper_bound=parameter_bounds[param_name][1],
                )
            else:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess, )

        ### Evaluate the model at the data points you're trying to fit
        x_data_original = copy.deepcopy(
            x_data
        )  # Make a copy of x_data so that you can determine if the model did in-place operations on x and tattle on the user.

        try:
            y_model = model(x_data, params)  # Evaluate the model
        except Exception:
            raise Exception("""
            There was an error when evaluating the model you supplied with the x_data you supplied.
            Likely possible causes:
                * Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters.
                * Your model should take in p as a dict of parameters, but it does not.
                * Your model assumes x is an array-like but you provided x_data as a dict, or vice versa.
            See the docstring of FittedModel() if you have other usage questions or would like to see examples.
            """)

        try:  ### If the model did in-place operations on x_data, throw an error
            x_data_is_unchanged = np.all(x_data == x_data_original)
        except ValueError:
            x_data_is_unchanged = np.all([
                x_series == x_series_original
                for x_series, x_series_original in zip(x_data, x_data_original)
            ])
        if not x_data_is_unchanged:
            raise TypeError(
                "model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!"
            )
        if y_model is None:  # Make sure that y_model actually returned something sensible
            raise TypeError(
                "model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray."
            )

        ### Compute how far off you are (error)
        if not put_residuals_in_logspace:
            error = y_model - y_data
        else:
            y_model = np.fmax(
                y_model, 1e-300
            )  # Keep y_model very slightly always positive, so that log() doesn't NaN.
            error = np.log(y_model) - np.log(y_data)

        ### Set up the optimization problem to minimize some norm(error), which looks different depending on the norm used:
        if residual_norm_type.lower() == "l1":  # Minimize the L1 norm
            abs_error = opti.variable(init_guess=0, n_vars=np.length(
                y_data))  # Make the abs() of each error entry an opt. var.
            opti.subject_to([
                abs_error >= error,
                abs_error >= -error,
            ])
            opti.minimize(np.sum(weights * abs_error))

        elif residual_norm_type.lower() == "l2":  # Minimize the L2 norm
            opti.minimize(np.sum(weights * error**2))

        elif residual_norm_type.lower(
        ) == "linf":  # Minimize the L-infinity norm
            linf_value = opti.variable(
                init_guess=0
            )  # Make the value of the L-infinity norm an optimization variable
            opti.subject_to([
                linf_value >= weights * error, linf_value >= -weights * error
            ])
            opti.minimize(linf_value)

        else:
            raise ValueError("Bad input for the 'residual_type' parameter.")

        ### Add in the constraints specified by fit_type, which force the model to stay above / below the data points.
        if fit_type == "best":
            pass
        elif fit_type == "upper bound":
            opti.subject_to(y_model >= y_data)
        elif fit_type == "lower bound":
            opti.subject_to(y_model <= y_data)
        else:
            raise ValueError("Bad input for the 'fit_type' parameter.")

        ### Solve
        sol = opti.solve(verbose=verbose)

        ##### Construct a FittedModel

        ### Create a vector of solved parameters
        params_solved = {}
        for param_name in params:
            try:
                params_solved[param_name] = sol.value(params[param_name])
            except:
                params_solved[param_name] = np.NaN

        ### Store all the data and inputs
        self.model = model
        self.x_data = x_data
        self.y_data = y_data
        self.parameters = params_solved
        self.parameter_guesses = parameter_guesses
        self.parameter_bounds = parameter_bounds
        self.residual_norm_type = residual_norm_type
        self.fit_type = fit_type
        self.weights = weights
        self.put_residuals_in_logspace = put_residuals_in_logspace
def inv_sigmoid(x):
    return np.log(1 / x - 1)
machs_to_plot = np.linspace(-0.5, 1.5, 500)
fig, ax = plt.subplots(1, 1, figsize=(8, 7), dpi=200)
plt.subplot(221)
plt.plot(machs_to_plot, beta(machs_to_plot), ".", label="")
plt.plot(machs_to_plot, fit(machs_to_plot))
plt.ylim(-0.05, 1.05)
plt.title("Fit: Normal Space")
plt.xlabel(r"Mach $M$ [-]")
plt.ylabel(r"$\beta = \sqrt{1-M^2}$")
plt.subplot(222)
plt.plot(machs_to_plot, inv_sigmoid(beta(machs_to_plot)), ".", label="")
plt.plot(machs_to_plot, inv_sigmoid(fit(machs_to_plot)))
plt.ylim(-15, 5)
plt.title("Fit: Inverse Sigmoid Space")
plt.xlabel(r"Mach $M$ [-]")
plt.ylabel(r"$\sigma^{-1}\left(\beta\right)$")
plt.subplot(223)
plt.plot(machs_to_plot, np.log(beta(machs_to_plot)), ".", label="")
plt.plot(machs_to_plot, np.log(fit(machs_to_plot)))
plt.ylim(-2.5, 0.5)
plt.title("Fit: Log Space")
plt.xlabel(r"Mach $M$ [-]")
plt.ylabel(r"$\ln(\beta)$")
plt.subplot(224)
plt.plot(machs_to_fit, error)
plt.title("Error in Fit range")
plt.savefig("machfitting.png")

p.show_plot()
def logint(x):
    # return int(x)
    logx = np.log(x)
    return np.exp(
        (logx[1:] + logx[:-1]) / 2
    )
Exemple #16
0
def Cd_cylinder(Re_D: float,
                mach: float = 0.,
                include_mach_effects=True,
                subcritical_only=False) -> float:
    """
    Returns the drag coefficient of a cylinder in crossflow as a function of its Reynolds number and Mach.

    Args:
        Re_D: Reynolds number, referenced to diameter
        mach: Mach number
        include_mach_effects: If this is set False, it assumes Mach = 0, which simplifies the computation.
        subcritical_only: Determines whether the model models purely subcritical (Re < 300k) cylinder flows. Useful, since
    this model is now convex and can be more well-behaved.

    Returns:

    # TODO rework this function to use tanh blending, which will mitigate overflows

    """

    ##### Do the viscous part of the computation
    csigc = 5.5766722118597247
    csigh = 23.7460859935990563
    csub0 = -0.6989492360435040
    csub1 = 1.0465189382830078
    csub2 = 0.7044228755898569
    csub3 = 0.0846501115443938
    csup0 = -0.0823564417206403
    csupc = 6.8020230357616764
    csuph = 9.9999999999999787
    csupscl = -0.4570690347113859

    x = np.log10(np.abs(Re_D) + 1e-16)

    if subcritical_only:
        Cd_mach_0 = 10**(csub0 * x + csub1) + csub2 + csub3 * x
    else:
        log10_Cd = (
            (np.log10(10**(csub0 * x + csub1) + csub2 + csub3 * x)) *
            (1 - 1 / (1 + np.exp(-csigh * (x - csigc)))) +
            (csup0 + csupscl / csuph * np.log(np.exp(csuph *
                                                     (csupc - x)) + 1)) *
            (1 / (1 + np.exp(-csigh * (x - csigc)))))
        Cd_mach_0 = 10**log10_Cd

    ##### Do the compressible part of the computation
    if include_mach_effects:
        m = mach
        p = {
            'a_sub': 0.03458900259594298,
            'a_sup': -0.7129528087049688,
            'cd_sub': 1.163206940186374,
            'cd_sup': 1.2899213533122527,
            's_sub': 3.436601777569716,
            's_sup': -1.37123096976983,
            'trans': 1.022819211244295,
            'trans_str': 19.017600596069848
        }

        Cd_over_Cd_mach_0 = np.blend(
            p["trans_str"] *
            (m - p["trans"]), p["cd_sup"] + np.exp(p["a_sup"] + p["s_sup"] *
                                                   (m - p["trans"])),
            p["cd_sub"] + np.exp(p["a_sub"] + p["s_sub"] *
                                 (m - p["trans"]))) / 1.1940010047391572

        Cd = Cd_mach_0 * Cd_over_Cd_mach_0

    else:
        Cd = Cd_mach_0

    return Cd
Exemple #17
0
] + list(87e3 + np.geomspace(5e3, 2000e3, 11)) +
                                list(0 - np.geomspace(5e3, 5000e3, 11)))

altitude_knot_points = np.sort(np.unique(altitude_knot_points))

temperature_knot_points = temperature_isa(altitude_knot_points)
pressure_knot_points = pressure_isa(altitude_knot_points)

# creates interpolated model for temperature and pressure
interpolated_temperature = InterpolatedModel(
    x_data_coordinates=altitude_knot_points,
    y_data_structured=temperature_knot_points,
)
interpolated_log_pressure = InterpolatedModel(
    x_data_coordinates=altitude_knot_points,
    y_data_structured=np.log(pressure_knot_points),
)


def pressure_differentiable(altitude):
    """
    Computes the pressure at a given altitude with a differentiable model.

    Args:
        altitude: Geopotential altitude [m]

    Returns: Pressure [Pa]

    """
    return np.exp(interpolated_log_pressure(altitude))
Exemple #18
0
def test_gpkit_style_solve():
    """
    Here, the problem is formulated *exactly* as it is in the GPKit paper.

    This isn't how you would actually want to solve this problem (lots of redundant variables here...), but
    this test just confirms that it's a valid mathematical formulation and still works.

    Also note that all constraints and the objective are log-transformed, so under the hood, this is basically
    exactly a geometric program (and should be convex, yay).
    """

    opti = asb.Opti()  # initialize an optimization environment

    ### Variables
    D = opti.variable(init_guess=1e3,
                      log_transform=True)  # total drag force [N]
    A = opti.variable(init_guess=1e1, log_transform=True)  # aspect ratio
    S = opti.variable(init_guess=1e2,
                      log_transform=True)  # total wing area [m^2]
    V = opti.variable(init_guess=1e2,
                      log_transform=True)  # cruising speed [m/s]
    W = opti.variable(init_guess=8e3,
                      log_transform=True)  # total aircraft weight [N]
    Re = opti.variable(init_guess=5e6,
                       log_transform=True)  # Reynolds number [-]
    C_D = opti.variable(init_guess=3e-2,
                        log_transform=True)  # Drag coefficient of wing [-]
    C_L = opti.variable(init_guess=1,
                        log_transform=True)  # Lift coefficient of wing [-]
    C_f = opti.variable(init_guess=1e-2,
                        log_transform=True)  # Skin friction coefficient [-]
    W_w = opti.variable(init_guess=3e3, log_transform=True)  # Wing weight [N]

    ### Constraints
    # Drag model
    C_D_fuse = CDA0 / S
    C_D_wpar = k * C_f * S_wetratio
    C_D_ind = C_L**2 / (pi * A * e)
    opti.subject_to(np.log(C_D) >= np.log(C_D_fuse + C_D_wpar + C_D_ind))

    # Wing weight model
    W_w_strc = W_W_coeff1 * (N_ult * A**1.5 * (W_0 * W * S)**0.5) / tau
    W_w_surf = W_W_coeff2 * S
    opti.subject_to(np.log(W_w) >= np.log(W_w_surf + W_w_strc))

    # Other models
    opti.subject_to([
        np.log(D) >= np.log(0.5 * rho * S * C_D * V**2),
        np.log(Re) <= np.log((rho / mu) * V * (S / A)**0.5),
        np.log(C_f) >= np.log(0.074 / Re**0.2),
        np.log(W) <= np.log(0.5 * rho * S * C_L * V**2),
        np.log(W) <= np.log(0.5 * rho * S * C_Lmax * V_min**2),
        np.log(W) >= np.log(W_0 + W_w),
    ])

    # Objective
    opti.minimize(np.log(D))

    sol = opti.solve()

    assert sol.value(D) == pytest.approx(303.1, abs=0.1)
Exemple #19
0
    def variable(self,
                 init_guess: Union[float, np.ndarray],
                 n_vars: int = None,
                 scale: float = None,
                 freeze: bool = False,
                 log_transform: bool = False,
                 category: str = "Uncategorized",
                 lower_bound: float = None,
                 upper_bound: float = None,
                 ) -> cas.MX:
        """
        Initializes a new decision variable (or vector of decision variables). You must pass an initial guess (
        `init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be
        overridden; see below for syntax.

        It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for
        nonconvex problems, although this is not strictly required.

        Args:

            init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the
            design space the optimizer will start looking.

                This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar,
                vector) that is created will be automatically inferred from the shape of the initial guess you
                provide here. (Although it can also be overridden using the `n_vars` parameter; see below.)

                For scalar variables, your initial guess should be a float:

                >>> opti = asb.Opti()
                >>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5

                For vector variables, your initial guess should be either:

                    * a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar
                    variable will be created:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

                    * a NumPy ndarray, in which case each element will be initialized to the corresponding value in
                    the given array:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of
                    >>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5.

                In the case where the variable is to be log-transformed (see `log_transform`), the initial guess
                should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the
                initial guess happens under the hood.) The initial guess must, of course, be a positive number in
                this case.

            n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not
            provided, the dimensionality of the variable is inferred from the initial guess `init_guess`.

                The only real case where you need to use this argument would be if you are initializing a vector
                variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`.
                For example:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

            scale: [Optional] Approximate scale of the variable.

                For example, if you're optimizing the design of a automobile and setting the tire diameter as an
                optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters.

                Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer
                converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are
                theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make
                solving poorly-scaled problems really difficult or impossible. See here for more info:
                https://web.casadi.org/blog/nlp-scaling/

                If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`.

            freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In
            order to select the determine to freeze the variable at, the optimizer will use the following logic:

                    * If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze
                    the variable at the value of initial guess.

                        >>> opti = Opti()
                        >>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5.

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, and you didn't manually specify to
                    freeze the variable: the variable will be frozen based on the value in the cache file (and ignore
                    the `init_guess`). Example:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing")
                        >>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess)

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, but you then manually specified that
                    the variable should be frozen: the variable will once again be frozen at the value of `init_guess`:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True)
                        >>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.)

            Motivation for freezing variables:

                The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say
                we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design
                - any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet!
                However, the farther we get into the design process, the more things get "locked in" - we may have
                ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later (
                let's say that we discover that one of our assumptions was too optimistic halfway through the design
                process), we have to make up for that lost margin using only the variables that are still free. To do
                this, we would freeze the variables that are already decided on.

                By categorizing variables, you can also freeze entire categories of variables. For example,
                you can freeze all of the wing design variables for an airplane but leave all of the fuselage
                variables free.

                This idea of freezing variables can also be used to look at off-design performance - freeze a
                design, but change the operating conditions.

            log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable
            before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical
            if negative (e.g. mass). Log-transforming these variables can also help maintain convexity.

            category: [Optional] What category of variables does this belong to?

        Usage notes:

            When using vector variables, individual components of this vector of variables can be accessed via normal
            indexing. Example:
                >>> opti = asb.Opti()
                >>> my_var = opti.variable(n_vars = 5)
                >>> opti.subject_to(my_var[3] >= my_var[2])  # This is a valid way of indexing
                >>> my_sum = asb.sum(my_var)  # This will sum up all elements of `my_var`

        Returns:
            The variable itself as a symbolic CasADi variable (MX type).

        """
        ### Set defaults
        if n_vars is None:  # Infer dimensionality from init_guess if it is not provided
            n_vars = np.length(init_guess)
        if scale is None:  # Infer a scale from init_guess if it is not provided
            if log_transform:
                scale = 1
            else:
                scale = np.mean(np.fabs(init_guess))  # Initialize the scale to a heuristic based on the init_guess
                if scale == 0:  # If that heuristic leads to a scale of 0, use a scale of 1 instead.
                    scale = 1

                # scale = np.fabs(
                #     np.where(
                #         init_guess != 0,
                #         init_guess,
                #         1
                #     ))

        # Validate the inputs
        if log_transform:
            if np.any(init_guess <= 0):
                raise ValueError(
                    "If you are initializing a log-transformed variable, the initial guess(es) must all be positive.")
        if np.any(scale <= 0):
            raise ValueError("The 'scale' argument must be a positive number.")

        # If the variable is in a category to be frozen, fix the variable at the initial guess.
        is_manually_frozen = freeze
        if category in self.variable_categories_to_freeze:
            freeze = True

        # If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
        if freeze:
            var = self.parameter(n_params=n_vars, value=init_guess)
        else:
            if not log_transform:
                var = scale * super().variable(n_vars)
                self.set_initial(var, init_guess)
            else:
                log_scale = scale / init_guess
                log_var = log_scale * super().variable(n_vars)
                var = np.exp(log_var)
                self.set_initial(log_var, np.log(init_guess))

        # Track the variable
        if category not in self.variables_categorized:  # Add a category if it does not exist
            self.variables_categorized[category] = []
        self.variables_categorized[category].append(var)
        var.is_manually_frozen = is_manually_frozen

        # Apply bounds
        if lower_bound is not None:
            self.subject_to(var >= lower_bound)
        if upper_bound is not None:
            self.subject_to(var <= upper_bound)

        return var
Exemple #20
0
    def generate_polars(
        self,
        alphas=np.linspace(-15, 15, 21),
        Res=np.geomspace(1e4, 1e7, 10),
        cache_filename: str = None,
        xfoil_kwargs: Dict[str, Any] = None,
        unstructured_interpolated_model_kwargs: Dict[str, Any] = None,
    ) -> None:
        """
        Generates airfoil polars (CL, CD, CM functions) and assigns them in-place to this Airfoil's polar functions.

        In other words, when this function is run, the following functions will be added (or overwritten) to the instance:
            * Airfoil.CL_function(alpha, Re, mach, deflection)
            * Airfoil.CD_function(alpha, Re, mach, deflection)
            * Airfoil.CM_function(alpha, Re, mach, deflection)

        Where alpha is in degrees. Right now, deflection is not used.

        Args:

            alphas: The range of alphas to sample from XFoil at.

            Res: The range of Reynolds numbers to sample from XFoil at.

            cache_filename: A path-like filename (ideally a "*.json" file) that can be used to cache the XFoil
            results, making it much faster to regenerate the results.

            xfoil_kwargs: Keyword arguments to pass into the AeroSandbox XFoil module. See the aerosandbox.XFoil
            constructor for options.

            unstructured_interpolated_model_kwargs: Keyword arguments to pass into the UnstructuredInterpolatedModels
            that contain the polars themselves. See the aerosandbox.UnstructuredInterpolatedModel constructor for
            options.

        Warning: In-place operation! Modifies this Airfoil object by setting Airfoil.CL_function, etc. to the new
        polars.

        Returns: None (in-place)

        """
        if self.coordinates is None:
            raise ValueError(
                "Cannot generate polars for an airfoil that you don't have the coordinates of!"
            )

        ### Set defaults
        if xfoil_kwargs is None:
            xfoil_kwargs = {}
        if unstructured_interpolated_model_kwargs is None:
            unstructured_interpolated_model_kwargs = {}

        xfoil_kwargs = {  # See asb.XFoil for documentation on these.
            "verbose": False,
            "max_iter": 20,
            "xfoil_repanel": True,
            **xfoil_kwargs
        }

        unstructured_interpolated_model_kwargs = {  # These were tuned heuristically as defaults!
            "resampling_interpolator_kwargs": {
                "degree": 0,
                # "kernel": "linear",
                "kernel": "multiquadric",
                "epsilon": 3,
                "smoothing": 0.01,
                # "kernel": "cubic"
            },
            **unstructured_interpolated_model_kwargs
        }

        ### Retrieve XFoil Polar Data from cache, if it exists.
        data = None
        if cache_filename is not None:
            try:
                with open(cache_filename, "r") as f:
                    data = {k: np.array(v) for k, v in json.load(f).items()}
            except FileNotFoundError:
                pass

        ### Analyze airfoil with XFoil, if needed
        if data is None:

            from aerosandbox.aerodynamics.aero_2D import XFoil

            def get_run_data(
                Re
            ):  # Get the data for an XFoil alpha sweep at one specific Re.
                run_data = XFoil(airfoil=self, Re=Re,
                                 **xfoil_kwargs).alpha(alphas)
                run_data["Re"] = Re * np.ones_like(run_data["alpha"])
                return run_data  # Data is a dict where keys are figures of merit [str] and values are 1D ndarrays.

            from tqdm import tqdm

            run_datas = [  # Get a list of dicts, where each dict is the result of an XFoil run at a particular Re.
                get_run_data(Re) for Re in tqdm(
                    Res,
                    desc=
                    f"Running XFoil to generate polars for Airfoil '{self.name}':",
                )
            ]
            data = {  # Merge the dicts into one big database of all runs.
                k:
                np.concatenate(tuple([run_data[k] for run_data in run_datas]))
                for k in run_datas[0].keys()
            }

            if cache_filename is not None:  # Cache the accumulated data for later use, if it doesn't already exist.
                with open(cache_filename, "w+") as f:
                    json.dump({k: v.tolist()
                               for k, v in data.items()},
                              f,
                              indent=4)

        ### Save the raw data as an instance attribute for later use
        self.xfoil_data = data

        ### Make the interpolators for attached aerodynamics
        from aerosandbox.modeling import UnstructuredInterpolatedModel

        alpha_resample = np.concatenate(
            [
                np.array([-180, -150, -120, -90, -60, -30]), alphas[::2],
                np.array([30, 60, 90, 120, 150, 180])
            ]
        )  # This is the list of points that we're going to resample from the XFoil runs for our InterpolatedModel, using an RBF.
        Re_resample = np.concatenate(
            [
                np.array([1e0, 1e1, 1e2, 1e3]), Res,
                np.array([1e8, 1e9, 1e10, 1e11, 1e12])
            ]
        )  # This is the list of points that we're going to resample from the XFoil runs for our InterpolatedModel, using an RBF.

        x_data = {
            "alpha": data["alpha"],
            "ln_Re": np.log(data["Re"]),
        }
        x_data_resample = {
            "alpha": alpha_resample,
            "ln_Re": np.log(Re_resample)
        }

        CL_attached_interpolator = UnstructuredInterpolatedModel(
            x_data=x_data,
            y_data=data["CL"],
            x_data_resample=x_data_resample,
            **unstructured_interpolated_model_kwargs)
        log10_CD_attached_interpolator = UnstructuredInterpolatedModel(
            x_data=x_data,
            y_data=np.log10(data["CD"]),
            x_data_resample=x_data_resample,
            **unstructured_interpolated_model_kwargs)
        CM_attached_interpolator = UnstructuredInterpolatedModel(
            x_data=x_data,
            y_data=data["CM"],
            x_data_resample=x_data_resample,
            **unstructured_interpolated_model_kwargs)

        ### Determine if separated
        alpha_stall_positive = np.max(data["alpha"])  # Across all Re
        alpha_stall_negative = np.min(data["alpha"])  # Across all Re

        def separation_parameter(alpha, Re=0):
            """
            Positive if separated, negative if attached.

            This will be an input to a tanh() sigmoid blend via asb.numpy.blend(), so a value of 1 means the flow is
            ~90% separated, and a value of -1 means the flow is ~90% attached.
            """
            return 0.5 * np.softmax(alpha - alpha_stall_positive,
                                    alpha_stall_negative - alpha)

        ### Make the interpolators for separated aerodynamics
        from aerosandbox.aerodynamics.aero_2D.airfoil_polar_functions import airfoil_coefficients_post_stall

        CL_if_separated, CD_if_separated, CM_if_separated = airfoil_coefficients_post_stall(
            airfoil=self, alpha=alpha_resample)

        CD_if_separated = CD_if_separated + np.median(data["CD"])
        # The line above effectively ensures that separated CD will never be less than attached CD. Not exactly, but generally close. A good heuristic.

        CL_separated_interpolator = UnstructuredInterpolatedModel(
            x_data=alpha_resample, y_data=CL_if_separated)
        log10_CD_separated_interpolator = UnstructuredInterpolatedModel(
            x_data=alpha_resample, y_data=np.log10(CD_if_separated))
        CM_separated_interpolator = UnstructuredInterpolatedModel(
            x_data=alpha_resample, y_data=CM_if_separated)

        def CL_function(alpha, Re, mach=0, deflection=0):
            alpha = np.mod(alpha + 180,
                           360) - 180  # Keep alpha in the valid range.
            CL_attached = CL_attached_interpolator({
                "alpha": alpha,
                "ln_Re": np.log(Re),
            })
            CL_separated = CL_separated_interpolator(alpha)
            return np.blend(separation_parameter(alpha, Re), CL_separated,
                            CL_attached)

        def CD_function(alpha, Re, mach=0, deflection=0):
            alpha = np.mod(alpha + 180,
                           360) - 180  # Keep alpha in the valid range.
            log10_CD_attached = log10_CD_attached_interpolator({
                "alpha":
                alpha,
                "ln_Re":
                np.log(Re),
            })
            log10_CD_separated = log10_CD_separated_interpolator(alpha)
            return 10**np.blend(
                separation_parameter(alpha, Re),
                log10_CD_separated,
                log10_CD_attached,
            )

        def CM_function(alpha, Re, mach=0, deflection=0):
            alpha = np.mod(alpha + 180,
                           360) - 180  # Keep alpha in the valid range.
            CM_attached = CM_attached_interpolator({
                "alpha": alpha,
                "ln_Re": np.log(Re),
            })
            CM_separated = CM_separated_interpolator(alpha)
            return np.blend(separation_parameter(alpha, Re), CM_separated,
                            CM_attached)

        self.CL_function = CL_function
        self.CD_function = CD_function
        self.CM_function = CM_function
Exemple #21
0
def ln(x):
    return np.log(np.abs(x))