Ejemplo n.º 1
0
def Cl_e216(alpha, Re_c):
    # A curve fit I did to a Eppler 216 (e216) airfoil, 2D XFoil data. Incompressible flow.
    # Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9994
    # Likely valid from -6 < alpha < 12 and 10^4 < Re_c < Inf.
    # See: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216

    Re_c = np.fmax(Re_c, 1)
    log10_Re = np.log10(Re_c)

    # Coeffs
    a1l = 3.0904412662858878e-02
    a1t = 9.6452654383488254e-02
    a4t = -2.5633334023068302e-05
    asl = 6.4175433185427011e-01
    atr = 3.6775107602844948e-01
    c0l = -2.5909363461176749e-01
    c0t = 8.3824440586718862e-01
    ctr = 1.1431810545735890e+02
    ksl = 5.3416670116733611e-01
    rtr = 3.9713338634462829e+01
    rtr2 = -3.3634858542657771e+00
    xsl = -1.2220899840236835e-01

    a = alpha
    r = log10_Re

    Cl = (c0t + a1t * a + a4t * a**4) * 1 / (
        1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r**2)) + (
            c0l + a1l * a + asl / (1 + np.exp(-ksl * (a - xsl)))) * (
                1 - 1 / (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r**2)))

    return Cl
Ejemplo n.º 2
0
def Cl_rae2822(alpha, Re_c):
    # A curve fit I did to a RAE2822 airfoil, 2D XFoil data. Incompressible flow.
    # Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9857
    # Likely valid from -6 < alpha < 12 and 10^4 < Re_c < 10^6.
    # See: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\rae2822

    Re_c = np.fmax(Re_c, 1)
    log10_Re = np.log10(Re_c)

    # Coeffs
    a1l = 5.5686866813855172e-02
    a1t = 9.7472055628494134e-02
    a4l = -7.2145733312046152e-09
    a4t = -3.6886704372829236e-06
    atr = 8.3723547264375520e-01
    atr2 = -8.3128119739031697e-02
    c0l = -4.9103908291438701e-02
    c0t = 2.3903424824298553e-01
    ctr = 1.3082854754897108e+01
    rtr = 2.6963082864300731e+00

    a = alpha
    r = log10_Re

    Cl = (c0t + a1t * a + a4t * a**4) * 1 / (
        1 + np.exp(ctr - rtr * r - atr * a - atr2 * a**2)) + (
            c0l + a1l * a +
            a4l * a**4) * (1 - 1 /
                           (1 + np.exp(ctr - rtr * r - atr * a - atr2 * a**2)))

    return Cl
Ejemplo n.º 3
0
def Cd_profile_e216(alpha, Re_c):
    # A curve fit I did to a Eppler 216 (e216) airfoil, 2D XFoil data. Incompressible flow.
    # Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9995
    # Likely valid from -6 < alpha < 12 and 10^4 < Re_c < 10^6.
    # see: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216

    Re_c = np.fmax(Re_c, 1)
    log10_Re = np.log10(Re_c)

    # Coeffs
    a1l = 4.7167470806940448e-02
    a1t = 7.5663005080888857e-02
    a2l = 8.7552076545610764e-04
    a4t = 1.1220763679805319e-05
    atr = 4.2456038382581129e-01
    c0l = -1.4099657419753771e+00
    c0t = -2.3855286371940609e+00
    ctr = 9.1474872611212135e+01
    rtr = 3.0218483612170434e+01
    rtr2 = -2.4515094313899279e+00

    a = alpha
    r = log10_Re

    log10_Cd = (c0t + a1t * a + a4t * a**4) * 1 / (
        1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r**2)) + (
            c0l + a1l * a +
            a2l * a**2) * (1 - 1 /
                           (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r**2)))

    Cd = 10**log10_Cd

    return Cd
def test_basic_math(types):
    for x in types["all"]:
        for y in types["all"]:
            ### Arithmetic
            x + y
            x - y
            x * y
            x / y
            np.sum(x)  # Sum of all entries of array-like object x

            ### Exponentials & Powers
            x**y
            np.power(x, y)
            np.exp(x)
            np.log(x)
            np.log10(x)
            np.sqrt(x)  # Note: do x ** 0.5 rather than np.sqrt(x).

            ### Trig
            np.sin(x)
            np.cos(x)
            np.tan(x)
            np.arcsin(x)
            np.arccos(x)
            np.arctan(x)
            np.arctan2(y, x)
            np.sinh(x)
            np.cosh(x)
            np.tanh(x)
            np.arcsinh(x)
            np.arccosh(x)
            np.arctanh(x - 0.5)  # `- 0.5` to give valid argument
Ejemplo n.º 5
0
def Cd_cylinder(Re_D: float, subcritical_only=False) -> float:
    """
    Returns the drag coefficient of a cylinder in crossflow as a function of its Reynolds number.
    :param Re_D: Reynolds number, referenced to diameter
    :param subcritical_only: Determines whether the model models purely subcritical (Re < 300k) cylinder flows. Useful, since
    this model is now convex and can be more well-behaved.
    :return: Drag coefficient

    # TODO rework this function to use tanh blending, which will mitigate overflows
    """
    csigc = 5.5766722118597247
    csigh = 23.7460859935990563
    csub0 = -0.6989492360435040
    csub1 = 1.0465189382830078
    csub2 = 0.7044228755898569
    csub3 = 0.0846501115443938
    csup0 = -0.0823564417206403
    csupc = 6.8020230357616764
    csuph = 9.9999999999999787
    csupscl = -0.4570690347113859

    x = np.log10(np.abs(Re_D) + 1e-16)

    if subcritical_only:
        Cd = 10**(csub0 * x + csub1) + csub2 + csub3 * x
    else:
        log10_Cd = (
            (np.log10(10**(csub0 * x + csub1) + csub2 + csub3 * x)) *
            (1 - 1 / (1 + np.exp(-csigh * (x - csigc)))) +
            (csup0 + csupscl / csuph * np.log(np.exp(csuph *
                                                     (csupc - x)) + 1)) *
            (1 / (1 + np.exp(-csigh * (x - csigc)))))
        Cd = 10**log10_Cd

    return Cd
Ejemplo n.º 6
0
def Cd_profile_rae2822(alpha, Re_c):
    # A curve fit I did to a RAE2822 airfoil, 2D XFoil data. Incompressible flow.
    # Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9995
    # Likely valid from -6 < alpha < 12 and 10^4 < Re_c < Inf.
    # see: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216

    Re_c = np.fmax(Re_c, 1)
    log10_Re = np.log10(Re_c)

    # Coeffs
    at = 8.1034027621509015e+00
    c0l = -8.4296746456429639e-01
    c0t = -1.3700609138855402e+00
    kart = -4.1609994062600880e-01
    kat = 5.9510959342452441e-01
    krt = -7.1938030052506197e-01
    r1l = 1.1548628822014631e-01
    r1t = -4.9133662875044504e-01
    rt = 5.0070459892411696e+00

    a = alpha
    r = log10_Re

    log10_Cd = (c0t + r1t * (r - 4)) * (
            1 / (1 + np.exp(kat * (a - at) + krt * (r - rt) + kart * (a - at) * (r - rt)))) + (
                       c0l + r1l * (r - 4)) * (
                       1 - 1 / (1 + np.exp(kat * (a - at) + krt * (r - rt) + kart * (a - at) * (r - rt))))

    Cd = 10 ** log10_Cd

    return Cd
Ejemplo n.º 7
0
def kussners_function(reduced_time: Union[np.ndarray, float]):
    """ 
    A commonly used approximation to Kussner's function (Sears and Sparks 1941)
    
    Args:
        reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
    """
    return 1 - 0.5 * np.exp(-0.13 * reduced_time) - 0.5 * np.exp(-reduced_time)
Ejemplo n.º 8
0
def kussners_function(reduced_time: Union[np.ndarray, float]):
    """ 
    A commonly used approximation to Kussner's function (Sears and Sparks 1941)
    
    Args:
        reduced_time (float,np.ndarray) : This is equal to the number of semichords travelled. See function calculate_reduced_time
    """
    kussner = (1 - 0.5 * np.exp(-0.13 * reduced_time) -
               0.5 * np.exp(-reduced_time)) * np.where(reduced_time >= 0, 1, 0)
    return kussner
Ejemplo n.º 9
0
def wagners_function(reduced_time: Union[np.ndarray, float]):
    """ 
    A commonly used approximation to Wagner's function 
    (Jones, R.T. The Unsteady Lift of a Finite Wing; Technical Report NACA TN-682; NACA: Washington, DC, USA, 1939)
    
    Args:
        reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
    """
    return (1 - 0.165 * np.exp(-0.0455 * reduced_time) -
            0.335 * np.exp(-0.3 * reduced_time))
Ejemplo n.º 10
0
def barometric_formula(
    P_b,
    T_b,
    L_b,
    h,
    h_b,
):
    """
    The barometric pressure equation, from here: https://en.wikipedia.org/wiki/Barometric_formula
    Args:
        P_b: Pressure at the base of the layer, in Pa
        T_b: Temperature at the base of the layer, in K
        L_b: Temperature lapse rate, in K/m
        h: Altitude, in m
        h_b:

    Returns:

    """
    with np.errstate(divide="ignore", over="ignore"):
        T = T_b + L_b * (h - h_b)
        T = np.fmax(T,
                    0)  # Keep temperature nonnegative, no matter the inputs.
        if L_b != 0:
            return P_b * (T / T_b)**(-g / (gas_constant_air * L_b))
        else:
            return P_b * np.exp(-g * (h - h_b) / (gas_constant_air * T_b))
def fuselage_base_drag_coefficient(mach: float) -> float:
    """
    A fit for the fuselage base drag coefficient of a cylindrical fuselage, as described in:

    MIL-HDBK-762: DESIGN OF AERODYNAMICALLY STABILIZED FREE ROCKETS:
        * Section 5-5.3.1 Body-of-Revolution Base Drag, Rocket Jet Plume-Off
        * Figure 5-140: Effects of Mach Number and Reynolds Number on Base Pressure

    Fits in /studies/FuselageBaseDragCoefficient

    Args:
        mach: Mach number [-]

    Returns: Fuselage base drag coefficient

    """

    m = mach
    p = {
        'a': 0.18024110740341143,
        'center_sup': -0.21737019935624047,
        'm_trans': 0.9985447737532848,
        'pc_sub': 0.15922582283573747,
        'pc_sup': 0.04698820458826384,
        'scale_sup': 0.34978926411193456,
        'trans_str': 9.999987483414937
    }

    return np.blend(
        p["trans_str"] * (m - p["m_trans"]), p["pc_sup"] +
        p["a"] * np.exp(-(p["scale_sup"] * (m - p["center_sup"]))**2),
        p["pc_sub"])
Ejemplo n.º 12
0
def make_matrix(x):
    matrix = np.ones((len(x), degree + 1))
    for j in range(1, degree - 2):
        matrix[:, j] = matrix[:, j - 1] * x
    matrix[:, degree - 2] = np.cos(x)
    matrix[:, degree - 1] = np.sin(x)
    matrix[:, degree] = np.exp(x)
    return matrix
Ejemplo n.º 13
0
def gaussian_pitch(reduced_time: float) -> float:
    """
    A pitch maneuver resembling a guassian curve
    Args:
        reduced_time (float) 
    Returns:
        angle_of_attack (float) : in degrees
    """
    return -25 * np.exp(-((reduced_time - 7.5) / 3)**2)
Ejemplo n.º 14
0
def pressure_differentiable(altitude):
    """
    Computes the pressure at a given altitude with a differentiable model.

    Args:
        altitude: Geopotential altitude [m]

    Returns: Pressure [Pa]

    """
    return np.exp(interpolated_log_pressure(altitude))
Ejemplo n.º 15
0
def induced_drag_ratio_from_ground_effect(h_over_b  # type: float
                                          ):
    """
    Gives the ratio of actual induced drag to free-flight induced drag experienced by a wing in ground effect.
    Artificially smoothed below around h/b == 0.05 to retain differentiability and practicality.
    Source: W. F. Phillips, D. F. Hunsaker, "Lifting-Line Predictions for Induced Drag and Lift in Ground Effect".
        Using Equation 5 from the paper, which is modified from a model from Torenbeek:
            Torenbeek, E. "Ground Effects", 1982.
    :param h_over_b: (Height above ground) divided by (wingspan).
    :return: Ratio of induced drag in ground effect to induced drag out of ground effect [unitless]
    """
    h_over_b = np.softmax(h_over_b, 0, hardness=1 / 0.03)
    return 1 - np.exp(-4.01 * (2 * h_over_b)**0.717)
Ejemplo n.º 16
0
def oswalds_efficiency(
        taper_ratio: float,
        aspect_ratio: float,
        sweep: float = 0.,
        fuselage_diameter_to_span_ratio: float = 0.,
) -> float:
    """
    Computes the Oswald's efficiency factor for a planar, tapered, swept wing.

    Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
    by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.

    Implementation of Section 5 from the above paper.

    Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.

    Args:
        taper_ratio: Taper ratio of the wing (tip_chord / root_chord) [-]
        aspect_ratio: Aspect ratio of the wing (b^2 / S) [-]
        sweep: Wing quarter-chord sweep angle [deg]

    Returns: Oswald's efficiency factor [-]

    """
    sweep = np.clip(sweep, 0, 90)  # TODO input proper analytic continuation

    def f(l):  # f(lambda), given as Eq. 36 in the Nita and Scholz paper (see parent docstring).
        return (
                0.0524 * l ** 4
                - 0.15 * l ** 3
                + 0.1659 * l ** 2
                - 0.0706 * l
                + 0.0119
        )

    delta_lambda = -0.357 + 0.45 * np.exp(-0.0375 * sweep)
    # Eq. 37 in Nita & Scholz.
    # Note: there is a typo in the cited paper; the negative in the exponent was omitted.
    # A bit of thinking about this reveals that this omission must be erroneous.

    e_theo = 1 / (
            1 + f(taper_ratio - delta_lambda) * aspect_ratio
    )

    fuselage_wake_contraction_correction_factor = 1 - 2 * (fuselage_diameter_to_span_ratio) ** 2

    e = e_theo * fuselage_wake_contraction_correction_factor

    return e
Ejemplo n.º 17
0
    def _pressure_differentiable(self):
        altitude_scaled = self.altitude / 40000

        p1 = -1.822942e+00
        p2 = 5.366751e+00
        p3 = -5.021452e+00
        p4 = -4.424532e+00
        p5 = 1.151986e+01

        x = altitude_scaled
        logP = p5 + x * (p4 + x * (p3 + x * (p2 + x * (p1))))

        pressure = np.exp(logP)

        return pressure
Ejemplo n.º 18
0
def scattering_factor(elevation_angle):
    """
    Calculates a scattering factor (a factor that gives losses due to atmospheric scattering at low elevation angles).
    Source: AeroSandbox/studies/SolarPanelScattering
    :param elevation_angle: Angle between the horizon and the sun [degrees]
    :return: Fraction of the light that is not lost to scattering.
    """
    elevation_angle = np.clip(elevation_angle, 0, 90)
    theta = 90 - elevation_angle  # Angle between panel normal and the sun, in degrees

    # # Model 1
    # c = (
    #     0.27891510500505767300438719757949,
    #     -0.015994330894744987481281839336589,
    #     -19.707332432605799255043166340329,
    #     -0.66260979582573353852126274432521
    # )
    # scattering_factor = c[0] + c[3] * theta_rad + cas.exp(
    #     c[1] * (
    #             cas.tan(theta_rad) + c[2] * theta_rad
    #     )
    # )

    # Model 2
    c = (
        -0.04636,
        -0.3171
    )
    scattering_factor = np.exp(
        c[0] * (
                np.tand(theta * 0.999) + c[1] * np.radians(theta)
        )
    )

    # # Model 3
    # p1 = -21.74
    # p2 = 282.6
    # p3 = -1538
    # p4 = 1786
    # q1 = -923.2
    # q2 = 1456
    # x = theta_rad
    # scattering_factor = ((p1*x**3 + p2*x**2 + p3*x + p4) /
    #            (x**2 + q1*x + q2))

    # Keep this:
    # scattering_factor = cas.fmin(cas.fmax(scattering_factor, 0), 1)
    return scattering_factor
Ejemplo n.º 19
0
def optimal_taper_ratio(sweep=0., ) -> float:
    """
    Computes the optimal (minimum-induced-drag) taper ratio for a given quarter-chord sweep angle.

    Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
    by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.

    Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.

    Args:
        sweep: Wing quarter-chord sweep angle [deg]

    Returns: Optimal taper ratio

    """
    return 0.45 * np.exp(-0.0375 * sweep)
Ejemplo n.º 20
0
def smoothmax(value1, value2, hardness):
    """
    A smooth maximum between two functions. Also referred to as the logsumexp() function.
    Useful because it's differentiable and preserves convexity!
    Great writeup by John D Cook here:
        https://www.johndcook.com/soft_maximum.pdf
    :param value1: Value of function 1.
    :param value2: Value of function 2.
    :param hardness: Hardness parameter. Higher values make this closer to max(x1, x2).
    :return: Soft maximum of the two supplied values.
    """
    value1 = value1 * hardness
    value2 = value2 * hardness
    max = np.fmax(value1, value2)
    min = np.fmin(value1, value2)
    out = max + np.log(1 + np.exp(min - max))
    out /= hardness
    return out
Ejemplo n.º 21
0
    def _temperature_differentiable(self):

        altitude_scaled = self.altitude / 40000

        p1 = -2.122102e+01
        p2 = 7.000812e+01
        p3 = -8.759170e+01
        p4 = 5.047893e+01
        p5 = -1.176537e+01
        p6 = -3.566535e-02
        p7 = 5.649588e+00

        x = altitude_scaled
        logT = p7 + x * (p6 + x * (p5 + x * (p4 + x * (p3 + x * (p2 + x * (p1))))))

        temperature = np.exp(logT)

        return temperature
Ejemplo n.º 22
0
def Cd_profile_2412(alpha, Re_c):
    # A curve fit I did to a NACA 2412 airfoil in incompressible flow.
    # Within -2 < alpha < 12 and 10^5 < Re_c < 10^7, has R^2 = 0.9713

    Re_c = np.fmax(Re_c, 1)
    log_Re = np.log(Re_c)

    CD0 = -5.249
    Re0 = 15.61
    Re1 = 15.31
    alpha0 = 1.049
    alpha1 = -4.715
    cx = 0.009528
    cxy = -0.00588
    cy = 0.04838

    log_CD = CD0 + cx * (alpha - alpha0) ** 2 + cy * (log_Re - Re0) ** 2 + cxy * (alpha - alpha1) * (
            log_Re - Re1)  # basically, a rotated paraboloid in logspace
    CD = np.exp(log_CD)

    return CD
Ejemplo n.º 23
0
def softmax(*args, hardness=1):
    """
    An element-wise softmax between two or more arrays. Also referred to as the logsumexp() function.

    Useful for optimization because it's differentiable and preserves convexity!

    Great writeup by John D Cook here:
        https://www.johndcook.com/soft_maximum.pdf

    Args:
        Provide any number of arguments as values to take the softmax of.

        hardness: Hardness parameter. Higher values make this closer to max(x1, x2).

    Returns:
        Soft maximum of the supplied values.
    """
    if hardness <= 0:
        raise ValueError("The value of `hardness` must be positive.")

    if len(args) <= 1:
        raise ValueError("You must call softmax with the value of two or more arrays that you'd like to take the "
                         "element-wise softmax of.")

    ### Scale the args by hardness
    args = [arg * hardness for arg in args]

    ### Find the element-wise max and min of the arrays:
    min = args[0]
    max = args[0]
    for arg in args[1:]:
        min = _np.fmin(min, arg)
        max = _np.fmax(max, arg)

    out = max + _np.log(sum(
            [_np.exp(array - max) for array in args]
        )
    )
    out = out / hardness
    return out
Ejemplo n.º 24
0
def wind_speed_conus_summer_99(altitude, latitude):
    """
    Returns the 99th-percentile wind speed magnitude over the continental United States (CONUS) in July-Aug. Aggregate of data from 1972 to 2019.
    Fits at C:\Projects\GitHub\Wind_Analysis
    :param altitude: altitude [m]
    :param latitude: latitude [deg]
    :return: 99th-percentile wind speed over the continental United States in the summertime. [m/s]
    """
    l = (latitude - 37.5) / 11.5
    a = (altitude - 24200) / 24200

    agc = -0.5363486000267786
    agh = 1.9569754777072828
    ags = 0.1458701999734713
    aqc = -1.4645014948089652
    c0 = -0.5169694086686631
    c12 = 0.0849519807021402
    c21 = -0.0252010113059998
    c4a = 0.0225856848053377
    c4c = 1.0281877353734501
    cg = 0.8050736230004489
    cgc = 0.2786691793571486
    cqa = 0.1866078047914259
    cql = 0.0165126852561671
    cqla = -0.1361667658248024
    lgc = 0.6943655538727291
    lgh = 2.0777449841036777
    lgs = 0.9805766577269118
    lqc = 4.0356834595743214

    s = c0 + cql * (l -
                    lqc)**2 + cqa * (a - aqc)**2 + cqla * a * l + cg * np.exp(
                        -(np.fabs(l - lgc)**lgh /
                          (2 * lgs**2) + np.fabs(a - agc)**agh /
                          (2 * ags**2) + cgc * a * l)) + c4a * (
                              a - c4c)**4 + c12 * l * a**2 + c21 * l**2 * a

    speed = s * 56 + 7
    return speed
def model(x, p):
    l = x["lats_scaled"]
    a = x["alts_scaled"]

    agc = p["agc"]
    agh = p["agh"]
    ags = p["ags"]
    aqc = p["aqc"]
    c0 = p["c0"]
    c12 = p["c12"]
    c21 = p["c21"]
    c4a = p["c4a"]
    c4c = p["c4c"]
    cg = p["cg"]
    cgc = p["cgc"]
    cqa = p["cqa"]
    cql = p["cql"]
    cqla = p["cqla"]
    lgc = p["lgc"]
    lgh = p["lgh"]
    lgs = p["lgs"]
    lqc = p["lqc"]

    return (c0  # Constant
            + cql * (l - lqc)**2  # Quadratic in latitude
            + cqa * (a - aqc)**2  # Quadratic in altitude
            + cqla * a * l  # Quadratic cross-term
            + cg * np.exp(-(  # Gaussian bump
                np.fabs(l - lgc)**lgh /
                (2 * lgs**2) +  # Center/Spread in latitude
                np.fabs(a - agc)**agh /
                (2 * ags**2) +  # Center/Spread in altitude
                cgc * a * l  # Gaussian cross-term
            )) + c4a * (a - c4c)**4  # Altitude quartic
            + c12 * l * a**2  # Altitude linear-quadratic
            + c21 * l**2 * a  # Latitude linear-quadratic
            )
Ejemplo n.º 26
0
 def dW_ds(reduced_time):
     return (0.1005 * np.exp(-0.3 * reduced_time) +
             0.00750075 * np.exp(-0.0455 * reduced_time))
Ejemplo n.º 27
0
 def dK_ds(reduced_time):
     return (0.065 * np.exp(-0.13 * reduced_time) +
             0.5 * np.exp(-reduced_time))
Ejemplo n.º 28
0
    def variable(self,
                 init_guess: Union[float, np.ndarray],
                 n_vars: int = None,
                 scale: float = None,
                 freeze: bool = False,
                 log_transform: bool = False,
                 category: str = "Uncategorized",
                 lower_bound: float = None,
                 upper_bound: float = None,
                 ) -> cas.MX:
        """
        Initializes a new decision variable (or vector of decision variables). You must pass an initial guess (
        `init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be
        overridden; see below for syntax.

        It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for
        nonconvex problems, although this is not strictly required.

        Args:

            init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the
            design space the optimizer will start looking.

                This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar,
                vector) that is created will be automatically inferred from the shape of the initial guess you
                provide here. (Although it can also be overridden using the `n_vars` parameter; see below.)

                For scalar variables, your initial guess should be a float:

                >>> opti = asb.Opti()
                >>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5

                For vector variables, your initial guess should be either:

                    * a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar
                    variable will be created:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

                    * a NumPy ndarray, in which case each element will be initialized to the corresponding value in
                    the given array:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of
                    >>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5.

                In the case where the variable is to be log-transformed (see `log_transform`), the initial guess
                should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the
                initial guess happens under the hood.) The initial guess must, of course, be a positive number in
                this case.

            n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not
            provided, the dimensionality of the variable is inferred from the initial guess `init_guess`.

                The only real case where you need to use this argument would be if you are initializing a vector
                variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`.
                For example:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

            scale: [Optional] Approximate scale of the variable.

                For example, if you're optimizing the design of a automobile and setting the tire diameter as an
                optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters.

                Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer
                converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are
                theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make
                solving poorly-scaled problems really difficult or impossible. See here for more info:
                https://web.casadi.org/blog/nlp-scaling/

                If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`.

            freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In
            order to select the determine to freeze the variable at, the optimizer will use the following logic:

                    * If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze
                    the variable at the value of initial guess.

                        >>> opti = Opti()
                        >>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5.

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, and you didn't manually specify to
                    freeze the variable: the variable will be frozen based on the value in the cache file (and ignore
                    the `init_guess`). Example:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing")
                        >>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess)

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, but you then manually specified that
                    the variable should be frozen: the variable will once again be frozen at the value of `init_guess`:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True)
                        >>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.)

            Motivation for freezing variables:

                The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say
                we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design
                - any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet!
                However, the farther we get into the design process, the more things get "locked in" - we may have
                ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later (
                let's say that we discover that one of our assumptions was too optimistic halfway through the design
                process), we have to make up for that lost margin using only the variables that are still free. To do
                this, we would freeze the variables that are already decided on.

                By categorizing variables, you can also freeze entire categories of variables. For example,
                you can freeze all of the wing design variables for an airplane but leave all of the fuselage
                variables free.

                This idea of freezing variables can also be used to look at off-design performance - freeze a
                design, but change the operating conditions.

            log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable
            before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical
            if negative (e.g. mass). Log-transforming these variables can also help maintain convexity.

            category: [Optional] What category of variables does this belong to?

        Usage notes:

            When using vector variables, individual components of this vector of variables can be accessed via normal
            indexing. Example:
                >>> opti = asb.Opti()
                >>> my_var = opti.variable(n_vars = 5)
                >>> opti.subject_to(my_var[3] >= my_var[2])  # This is a valid way of indexing
                >>> my_sum = asb.sum(my_var)  # This will sum up all elements of `my_var`

        Returns:
            The variable itself as a symbolic CasADi variable (MX type).

        """
        ### Set defaults
        if n_vars is None:  # Infer dimensionality from init_guess if it is not provided
            n_vars = np.length(init_guess)
        if scale is None:  # Infer a scale from init_guess if it is not provided
            if log_transform:
                scale = 1
            else:
                scale = np.mean(np.fabs(init_guess))  # Initialize the scale to a heuristic based on the init_guess
                if scale == 0:  # If that heuristic leads to a scale of 0, use a scale of 1 instead.
                    scale = 1

                # scale = np.fabs(
                #     np.where(
                #         init_guess != 0,
                #         init_guess,
                #         1
                #     ))

        # Validate the inputs
        if log_transform:
            if np.any(init_guess <= 0):
                raise ValueError(
                    "If you are initializing a log-transformed variable, the initial guess(es) must all be positive.")
        if np.any(scale <= 0):
            raise ValueError("The 'scale' argument must be a positive number.")

        # If the variable is in a category to be frozen, fix the variable at the initial guess.
        is_manually_frozen = freeze
        if category in self.variable_categories_to_freeze:
            freeze = True

        # If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
        if freeze:
            var = self.parameter(n_params=n_vars, value=init_guess)
        else:
            if not log_transform:
                var = scale * super().variable(n_vars)
                self.set_initial(var, init_guess)
            else:
                log_scale = scale / init_guess
                log_var = log_scale * super().variable(n_vars)
                var = np.exp(log_var)
                self.set_initial(log_var, np.log(init_guess))

        # Track the variable
        if category not in self.variables_categorized:  # Add a category if it does not exist
            self.variables_categorized[category] = []
        self.variables_categorized[category].append(var)
        var.is_manually_frozen = is_manually_frozen

        # Apply bounds
        if lower_bound is not None:
            self.subject_to(var >= lower_bound)
        if upper_bound is not None:
            self.subject_to(var <= upper_bound)

        return var
Ejemplo n.º 29
0
def sigmoid(x):
    return 1 / (1 + np.exp(x))
Ejemplo n.º 30
0
def model(m, p):
    return np.blend(
        p["trans_str"] * (m - p["m_trans"]), p["pc_sup"] +
        p["a"] * np.exp(-(p["scale_sup"] * (m - p["center_sup"]))**2),
        p["pc_sub"])