def _enforce_governing_equations(self):
        # Calculate unsteady lift due to pitching
        wagner = wagners_function(self.reduced_time)
        ds = self.reduced_time[1:] - self.reduced_time[:-1]
        da_ds = (self.angles_of_attack[1:] - self.angles_of_attack[:-1]) / ds
        init_term = self.angles_of_attack[0] * wagner[:-1]
        for i in range(self.timesteps - 1):
            integral_term = np.sum(da_ds[j] * wagner[i - j] * ds[j]
                                   for j in range(i))
            self.lift_coefficients[i] = 2 * np.pi * (integral_term +
                                                     init_term[i])

        # Calculate unsteady lift due to transverse gust
        kussner = kussners_function(self.reduced_time)
        dw_ds = (self.gust_profile[1:] - self.gust_profile[:-1]) / ds
        init_term = self.gust_profile[0] * kussner
        for i in range(self.timesteps - 1):
            integral_term = 0
            for j in range(i):
                integral_term += dw_ds[j] * kussner[i - j] * ds[j]
            self.lift_coefficients[i] += 2 * np.pi / self.velocity * (
                init_term[i] + integral_term)

        # Calculate unsteady lift due to added mass
        self.lift_coefficients += np.pi / 2 * np.cos(
            self.angles_of_attack[:-1])**2 * da_ds

        # Integral of lift to be minimized
        lift_squared_integral = np.sum(self.lift_coefficients**2)

        # Constraints and objective to minimize
        self.opti.subject_to(self.angles_of_attack[0] == 0)
        self.opti.minimize(lift_squared_integral)
Beispiel #2
0
    def goodness_of_fit(self, type="R^2"):
        """
        Returns a metric of the goodness of the fit.

        Args:

            type: Type of metric to use for goodness of fit. One of:

                * R^2: The coefficient of determination. Strictly speaking only mathematically rigorous to use this
                for linear fits.

                    https://en.wikipedia.org/wiki/Coefficient_of_determination

        Returns: The metric of the goodness of the fit.

        """
        if type == "R^2":

            y_mean = np.mean(self.y_data)

            SS_tot = np.sum((self.y_data - y_mean)**2)

            y_model = self(self.x_data)

            SS_res = np.sum((self.y_data - y_model)**2)

            R_squared = 1 - SS_res / SS_tot

            return R_squared

        else:
            raise ValueError("Bad value of `type`!")
Beispiel #3
0
def test_sum2():
    # Check it returns the same results with casadi and numpy
    a = np.array([[1, 2, 3], [1, 2, 3]])
    b = cas.SX(a)

    assert np.all(np.sum(a) == cas.DM(np.sum(b)))
    assert np.all(np.sum(a, axis=1) == cas.DM(np.sum(b, axis=1)))
def test_basic_math(types):
    for x in types["all"]:
        for y in types["all"]:
            ### Arithmetic
            x + y
            x - y
            x * y
            x / y
            np.sum(x)  # Sum of all entries of array-like object x

            ### Exponentials & Powers
            x**y
            np.power(x, y)
            np.exp(x)
            np.log(x)
            np.log10(x)
            np.sqrt(x)  # Note: do x ** 0.5 rather than np.sqrt(x).

            ### Trig
            np.sin(x)
            np.cos(x)
            np.tan(x)
            np.arcsin(x)
            np.arccos(x)
            np.arctan(x)
            np.arctan2(y, x)
            np.sinh(x)
            np.cosh(x)
            np.tanh(x)
            np.arcsinh(x)
            np.arccosh(x)
            np.arctanh(x - 0.5)  # `- 0.5` to give valid argument
Beispiel #5
0
def test_opti_hanging_chain_with_callback(plot=False):
    N = 40
    m = 40 / N
    D = 70 * N
    g = 9.81
    L = 1

    opti = asb.Opti()

    x = opti.variable(init_guess=np.linspace(-2, 2, N))
    y = opti.variable(
        init_guess=1,
        n_vars=N,
    )

    distance = np.sqrt(  # Distance from one node to the next
        np.diff(x)**2 + np.diff(y)**2)

    potential_energy_spring = 0.5 * D * np.sum((distance - L / N)**2)
    potential_energy_gravity = g * m * np.sum(y)
    potential_energy = potential_energy_spring + potential_energy_gravity

    opti.minimize(potential_energy)

    # Add end point constraints
    opti.subject_to([x[0] == -2, y[0] == 1, x[-1] == 2, y[-1] == 1])

    # Add a ground constraint
    opti.subject_to(y >= np.cos(0.1 * x) - 0.5)

    # Add a callback

    if plot:

        def my_callback(iter: int):
            plt.plot(opti.debug.value(x),
                     opti.debug.value(y),
                     ".-",
                     label=f"Iter {iter}",
                     zorder=3 + iter)

        fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
        x_ground = np.linspace(-2, 2, N)
        y_ground = np.cos(0.1 * x_ground) - 0.5
        plt.plot(x_ground, y_ground, "--k", zorder=2)

    else:

        def my_callback(iter: int):
            print(f"Iter {iter}")
            print(f"\tx = {opti.debug.value(x)}")
            print(f"\ty = {opti.debug.value(y)}")

    sol = opti.solve(callback=my_callback)

    assert sol.value(potential_energy) == pytest.approx(626.462, abs=1e-3)

    if plot:
        plt.show()
Beispiel #6
0
def obj(n):
    '''
    minimizes objective (loss+regulaization) given n and eta
    n contains values (0 or 1) for each variable
    '''
    opti = asb.Opti()
    coeffs = opti.variable(init_guess=np.zeros(degree + 1))
    y_model = model(coeffs * n)
    error = loss(y_model, y_data)
    opti.minimize(error + eta * np.sum(n))
    sol = opti.solve(verbose=False)

    return sol.value(error) + eta * np.sum(n), sol.value(coeffs)
Beispiel #7
0
def lower_bound(n):
    '''
    finds a lower bound for instance n
    '''

    opti = asb.Opti()
    coeffs = opti.variable(init_guess=np.zeros(degree + 1))
    n_new = np.where(n == None, 1, n)
    n_new = np.array(n_new)
    y_model = model(coeffs * n_new)
    error = loss(y_model, y_data)
    opti.minimize(error + eta * np.sum(np.where(n == None, 0, n)))
    sol = opti.solve(verbose=False)
    return sol.value(error) + eta * np.sum(np.where(n == None, 0, n))
Beispiel #8
0
def duhamel_integral_wagner(reduced_time: np.ndarray,
                            angle_of_attack: np.ndarray):
    """
    Calculates the duhamel superposition integral of Wagner's problem. 
    Given some arbitrary pitching profile, the lift coefficient 
    of a flat plate can be computed using this function
    
    
    Args:
        reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
        angle_of_attack (np.ndarray) : The angle of attack as a function of reduced time of the flat plate
    Returns:
        lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate 
    """
    assert np.size(reduced_time) == np.size(
        angle_of_attack
    ), "The pitching history and time must have the same length"

    da_ds = np.gradient(angle_of_attack)
    lift_coefficient = np.zeros_like(reduced_time)
    wagner = wagners_function(reduced_time)
    ds = np.gradient(reduced_time)

    for i, s in enumerate(reduced_time):
        lift_coefficient[i] = 2 * np.pi * (
            angle_of_attack[0] * wagner[i] +
            np.sum([da_ds[j] * wagner[i - j] * ds[j] for j in range(i)]))

    return lift_coefficient
Beispiel #9
0
def duhamel_integral_kussner(reduced_time: np.ndarray,
                             gust_velocity: np.ndarray, velocity: float):
    """
    Calculates the duhamel superposition integral of Kussner's problem. 
    Given some arbitrary transverse velocity profile, the lift coefficient 
    of a flat plate can be computed using this function
    
    
    Args:
        reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
        gust_velocity (np.ndarray) : The transverse velocity profile that the flate plate experiences
        velocity (float) :The velocity by which the flat plate enters the gust
        
    Returns:
        lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate 
    """
    assert np.size(reduced_time) == np.size(
        gust_velocity
    ), "The velocity history and time must have the same length"

    dw_ds = np.gradient(gust_velocity)

    lift_coefficient = np.zeros_like(reduced_time)
    kussner = kussners_function(reduced_time)
    ds = np.gradient(reduced_time)

    for i, s in enumerate(reduced_time):
        lift_coefficient[i] = 2 * np.pi / velocity * (
            gust_velocity[0] * kussner[i] +
            np.sum([dw_ds[j] * kussner[i - j] * ds[j] for j in range(i)]))

    return lift_coefficient
    def calculate_transients(self):
        self.optimal_pitching_profile_rad = self.opti.value(
            self.angles_of_attack)
        self.optimal_pitching_profile_deg = np.rad2deg(
            self.optimal_pitching_profile_rad)
        self.optimal_lift_history = self.opti.value(self.lift_coefficients)

        self.pitching_lift = np.zeros(self.timesteps - 1)
        # Calculate unsteady lift due to pitching
        wagner = wagners_function(self.reduced_time)
        ds = self.reduced_time[1:] - self.reduced_time[:-1]
        da_ds = (self.optimal_pitching_profile_rad[1:] -
                 self.optimal_pitching_profile_rad[:-1]) / ds
        init_term = self.optimal_pitching_profile_rad[0] * wagner[:-1]
        for i in range(self.timesteps - 1):
            integral_term = np.sum(da_ds[j] * wagner[i - j] * ds[j]
                                   for j in range(i))
            self.pitching_lift[i] = 2 * np.pi * (integral_term + init_term[i])

        self.gust_lift = np.zeros(self.timesteps - 1)
        # Calculate unsteady lift due to transverse gust
        kussner = kussners_function(self.reduced_time)
        dw_ds = (self.gust_profile[1:] - self.gust_profile[:-1]) / ds
        init_term = self.gust_profile[0] * kussner
        for i in range(self.timesteps - 1):
            integral_term = 0
            for j in range(i):
                integral_term += dw_ds[j] * kussner[i - j] * ds[j]
            self.gust_lift[i] += 2 * np.pi / self.velocity * (init_term[i] +
                                                              integral_term)

        # Calculate unsteady lift due to added mass
        self.added_mass_lift = np.pi / 2 * np.cos(
            self.optimal_pitching_profile_rad[:-1])**2 * da_ds
Beispiel #11
0
    def centroid(self):
        # Returns the centroid of the polygon, in nondimensional (chord-normalized) units.
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
        y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
        centroid = np.array([x_c, y_c])

        return centroid
Beispiel #12
0
def test_quadcopter_navigation():
    opti = asb.Opti()

    N = 300
    time_final = 1
    time = np.linspace(0, time_final, N)

    left_thrust = opti.variable(init_guess=0.5, scale=1, n_vars=N, lower_bound=0, upper_bound=1)
    right_thrust = opti.variable(init_guess=0.5, scale=1, n_vars=N, lower_bound=0, upper_bound=1)

    mass = 0.1

    dyn = asb.FreeBodyDynamics(
        opti_to_add_constraints_to=opti,
        time=time,
        xe=opti.variable(init_guess=np.linspace(0, 1, N)),
        ze=opti.variable(init_guess=np.linspace(0, -1, N)),
        u=opti.variable(init_guess=0, n_vars=N),
        w=opti.variable(init_guess=0, n_vars=N),
        theta=opti.variable(init_guess=np.linspace(np.pi / 2, np.pi / 2, N)),
        q=opti.variable(init_guess=0, n_vars=N),
        X=left_thrust + right_thrust,
        M=(right_thrust - left_thrust) * 0.1 / 2,
        mass=mass,
        Iyy=0.5 * mass * 0.1 ** 2,
        g=9.81,
    )

    opti.subject_to([  # Starting state
        dyn.xe[0] == 0,
        dyn.ze[0] == 0,
        dyn.u[0] == 0,
        dyn.w[0] == 0,
        dyn.theta[0] == np.radians(90),
        dyn.q[0] == 0,
    ])

    opti.subject_to([  # Final state
        dyn.xe[-1] == 1,
        dyn.ze[-1] == -1,
        dyn.u[-1] == 0,
        dyn.w[-1] == 0,
        dyn.theta[-1] == np.radians(90),
        dyn.q[-1] == 0,
    ])

    effort = np.sum(  # The average "effort per second", where effort is integrated as follows:
        np.trapz(left_thrust ** 2 + right_thrust ** 2) * np.diff(time)
    ) / time_final

    opti.minimize(effort)

    sol = opti.solve()
    dyn.substitute_solution(sol)

    assert sol.value(effort) == pytest.approx(0.714563, rel=0.01)

    print(sol.value(effort))
Beispiel #13
0
    def centroid(self) -> np.ndarray:
        """
        Returns the centroid of the polygon as a 1D np.ndarray of length 2.
        """
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
        y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
        centroid = np.array([x_c, y_c])

        return centroid
Beispiel #14
0
    def Ixx(self):
        # Returns the nondimensionalized Ixx moment of inertia, taken about the centroid.
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        x_c = 1 / (6 * A) * cas.sum1(a * (x + x_n))
        y_c = 1 / (6 * A) * cas.sum1(a * (y + y_n))
        centroid = np.array([x_c, y_c])

        Ixx = 1 / 12 * np.sum(a * (y**2 + y * y_n + y_n**2))

        Iuu = Ixx - A * centroid[1]**2

        return Iuu
Beispiel #15
0
    def area(self):
        # Returns the area of the polygon, in nondimensional (normalized to chord^2) units.
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        return A
Beispiel #16
0
    def Ixy(self):
        # Returns the nondimensionalized product of inertia, taken about the centroid.
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
        y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
        centroid = np.array([x_c, y_c])

        Ixy = 1 / 24 * np.sum(a *
                              (x * y_n + 2 * x * y + 2 * x_n * y_n + x_n * y))

        Iuv = Ixy - A * centroid[0] * centroid[1]

        return Iuv
def test_block_move_fixed_time():
    opti = asb.Opti()

    n_timesteps = 300

    time = np.linspace(0, 1, n_timesteps)

    dyn = asb.DynamicsPointMass1DHorizontal(
        mass_props=asb.MassProperties(mass=1),
        x_e=opti.variable(init_guess=np.linspace(0, 1, n_timesteps)),
        u_e=opti.variable(init_guess=1, n_vars=n_timesteps),
    )

    u = opti.variable(init_guess=np.linspace(1, -1, n_timesteps))

    dyn.add_force(
        Fx=u
    )

    dyn.constrain_derivatives(
        opti=opti,
        time=time
    )

    opti.subject_to([
        dyn.x_e[0] == 0,
        dyn.x_e[-1] == 1,
        dyn.u_e[0] == 0,
        dyn.u_e[-1] == 0,
    ])

    # effort = np.sum(
    #     np.trapz(dyn.X ** 2) * np.diff(time)
    # )

    effort = np.sum(  # More sophisticated integral-of-squares integration (closed form correct)
        np.diff(time) / 3 *
        (u[:-1] ** 2 + u[:-1] * u[1:] + u[1:] ** 2)
    )

    opti.minimize(effort)

    sol = opti.solve()

    dyn.substitute_solution(sol)

    assert dyn.x_e[0] == pytest.approx(0)
    assert dyn.x_e[-1] == pytest.approx(1)
    assert dyn.u_e[0] == pytest.approx(0)
    assert dyn.u_e[-1] == pytest.approx(0)
    assert np.max(dyn.u_e) == pytest.approx(1.5, abs=0.01)
    assert sol.value(u)[0] == pytest.approx(6, abs=0.05)
    assert sol.value(u)[-1] == pytest.approx(-6, abs=0.05)
Beispiel #18
0
    def Iyy(self):
        """
        Returns the nondimensionalized Iyy moment of inertia, taken about the centroid.
        """
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
        y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
        centroid = np.array([x_c, y_c])

        Iyy = 1 / 12 * np.sum(a * (x**2 + x * x_n + x_n**2))

        Ivv = Iyy - A * centroid[0]**2

        return Ivv
Beispiel #19
0
    def area(self) -> float:
        """
        Returns the area of the polygon.
        """
        x = self.x()
        y = self.y()
        x_n = np.roll(x, -1)  # x_next, or x_i+1
        y_n = np.roll(y, -1)  # y_next, or y_i+1

        a = x * y_n - x_n * y  # a is the area of the triangle bounded by a given point, the next point, and the origin.

        A = 0.5 * np.sum(a)  # area

        return A
Beispiel #20
0
    def _calculate_forces(self):

        for airfoil in self.airfoils:
            panel_dx = np.diff(airfoil.x())
            panel_dy = np.diff(airfoil.y())
            panel_length = (panel_dx**2 + panel_dy**2)**0.5

            ### Sum up the vorticity on this airfoil by integrating
            airfoil.vorticity = np.sum(
                (airfoil.gamma[1:] + airfoil.gamma[:-1]) / 2 * panel_length)

            airfoil.Cl = 2 * airfoil.vorticity  # TODO normalize by chord and freestream velocity etc.

        self.total_vorticity = sum(
            [airfoil.vorticity for airfoil in self.airfoils])
        self.Cl = 2 * self.total_vorticity
Beispiel #21
0
    def shape(w, x):
        # Class function
        C = x**N1 * (1 - x)**N2

        # Shape function (Bernstein polynomials)
        n = len(w) - 1  # Order of Bernstein polynomials

        K = comb(n, np.arange(n + 1))  # Bernstein polynomial coefficients

        S_matrix = (w * K * np.expand_dims(x, 1)**np.arange(n + 1) *
                    np.expand_dims(1 - x, 1)**(n - np.arange(n + 1))
                    )  # Polynomial coefficient * weight matrix
        S = np.sum(S_matrix, axis=1)

        # Calculate y output
        y = C * S
        return y
Beispiel #22
0
def peak_sun_hours_per_day_on_horizontal(latitude, day_of_year, scattering=True):
    """
    How many hours of equivalent peak sun do you get per day?
    :param latitude: Latitude [degrees]
    :param day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
    :param time: Time since (local) solar noon [seconds]
    :param scattering: Boolean: include scattering effects at very low angles?
    :return:
    """
    times = np.linspace(0, 86400, 1000)
    dt = np.diff(times)
    normalized_fluxes = (
        # solar_flux_outside_atmosphere_normal(day_of_year) *
        incidence_angle_function(latitude, day_of_year, times, scattering)
    )
    sun_hours = np.sum(
        (normalized_fluxes[1:] + normalized_fluxes[:-1]) / 2 * dt
    ) / 3600

    return sun_hours
Beispiel #23
0
    def __init__(
        self,
        model: Callable[
            [Union[np.ndarray,
                   Dict[str, np.ndarray]], Dict[str, float]], np.ndarray],
        x_data: Union[np.ndarray, Dict[str, np.ndarray]],
        y_data: np.ndarray,
        parameter_guesses: Dict[str, float],
        parameter_bounds: Dict[str, tuple] = None,
        residual_norm_type: str = "L2",
        fit_type: str = "best",
        weights: np.ndarray = None,
        put_residuals_in_logspace: bool = False,
        verbose=True,
    ):
        """
        Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach.

        Args:

            model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where:

                * x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n].

                    * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x
                    as a 1D ndarray. (If you do this, just give `x_data` as an array.)

                * p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value].

                Model should return a 1D ndarray of length n.

                Basically, if you've done it right:
                >>> model(x_data, parameter_guesses)
                should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit
                will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types
                should be happy.)

                Model should use aerosandbox.numpy operators.

                The model is not allowed to make any in-place changes to the input `x`. The most common way this
                manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x =
                x + 3`.

            x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
            var_name:var_data}.

                * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
                as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)

            y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]

            parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}.

                * Parameters will be initialized to the values set here; all parameters need an initial guess.

                * param_initial_guess is a float; note that only scalar parameters are allowed.

            parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}.

                * May contain only a subset of param_guesses if desired.

                * Use None to represent one-sided constraints (i.e. (None, 5)).

            residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options:

                * "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers.

                * "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default.

                * "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers.

            fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm),
            or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate
            modeling, so that you can put bounds on modeling error):

                * "best": finds the model of best fit. Usually, this is what you want.

                * "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize
                the specified residual norm).

                * "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize
                the specified residual norm).

            weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform.

                * Weights are automatically normalized. [1D ndarray of length n]

            put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error
            (useful for minimizing percent error).

            Note: If any model outputs or data are negative, this will raise an error!

            verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See
            `aerosandbox.Opti.solve(verbose=)` syntax for more details.

        Returns: A model in the form of a FittedModel object. Some things you can do:
            >>> y = FittedModel(x) # evaluate the FittedModel at new x points
            >>> FittedModel.parameters # directly examine the optimal values of the parameters that were found
            >>> FittedModel.plot() # plot the fit


        """
        super().__init__()

        ##### Prepare all inputs, check types/sizes.

        ### Flatten all inputs
        def flatten(input):
            return np.array(input).flatten()

        try:
            x_data = {k: flatten(v) for k, v in x_data.items()}
            x_data_is_dict = True
        except AttributeError:  # If it's not a dict or dict-like, assume it's a 1D ndarray dataset
            x_data = flatten(x_data)
            x_data_is_dict = False
        y_data = flatten(y_data)
        n_datapoints = np.length(y_data)

        ### Handle weighting
        if weights is None:
            weights = np.ones(n_datapoints)
        else:
            weights = flatten(weights)
        sum_weights = np.sum(weights)
        if sum_weights <= 0:
            raise ValueError("The weights must sum to a positive number!")
        if np.any(weights < 0):
            raise ValueError(
                "No entries of the weights vector are allowed to be negative!")
        weights = weights / np.sum(
            weights)  # Normalize weights so that they sum to 1.

        ### Check format of parameter_bounds input
        if parameter_bounds is None:
            parameter_bounds = {}
        for param_name, v in parameter_bounds.items():
            if param_name not in parameter_guesses.keys():
                raise ValueError(
                    f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses."
                )
            if not np.length(v) == 2:
                raise ValueError(
                    "Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
                    "For one-sided bounds, use None for the unbounded side.")

        ### If putting residuals in logspace, check positivity
        if put_residuals_in_logspace:
            if not np.all(y_data > 0):
                raise ValueError(
                    "You can't fit a model with residuals in logspace if y_data is not entirely positive!"
                )

        ### Check dimensionality of inputs to fitting algorithm
        relevant_inputs = {
            "y_data": y_data,
            "weights": weights,
        }
        try:
            relevant_inputs.update(x_data)
        except TypeError:
            relevant_inputs.update({"x_data": x_data})

        for key, value in relevant_inputs.items():
            # Check that the length of the inputs are consistent
            series_length = np.length(value)
            if not series_length == n_datapoints:
                raise ValueError(
                    f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}."
                )

        ##### Formulate and solve the fitting optimization problem

        ### Initialize an optimization environment
        opti = Opti()

        ### Initialize the parameters as optimization variables
        params = {}
        for param_name, param_initial_guess in parameter_guesses.items():
            if param_name in parameter_bounds:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess,
                    lower_bound=parameter_bounds[param_name][0],
                    upper_bound=parameter_bounds[param_name][1],
                )
            else:
                params[param_name] = opti.variable(
                    init_guess=param_initial_guess, )

        ### Evaluate the model at the data points you're trying to fit
        x_data_original = copy.deepcopy(
            x_data
        )  # Make a copy of x_data so that you can determine if the model did in-place operations on x and tattle on the user.

        try:
            y_model = model(x_data, params)  # Evaluate the model
        except Exception:
            raise Exception("""
            There was an error when evaluating the model you supplied with the x_data you supplied.
            Likely possible causes:
                * Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters.
                * Your model should take in p as a dict of parameters, but it does not.
                * Your model assumes x is an array-like but you provided x_data as a dict, or vice versa.
            See the docstring of FittedModel() if you have other usage questions or would like to see examples.
            """)

        try:  ### If the model did in-place operations on x_data, throw an error
            x_data_is_unchanged = np.all(x_data == x_data_original)
        except ValueError:
            x_data_is_unchanged = np.all([
                x_series == x_series_original
                for x_series, x_series_original in zip(x_data, x_data_original)
            ])
        if not x_data_is_unchanged:
            raise TypeError(
                "model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!"
            )
        if y_model is None:  # Make sure that y_model actually returned something sensible
            raise TypeError(
                "model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray."
            )

        ### Compute how far off you are (error)
        if not put_residuals_in_logspace:
            error = y_model - y_data
        else:
            y_model = np.fmax(
                y_model, 1e-300
            )  # Keep y_model very slightly always positive, so that log() doesn't NaN.
            error = np.log(y_model) - np.log(y_data)

        ### Set up the optimization problem to minimize some norm(error), which looks different depending on the norm used:
        if residual_norm_type.lower() == "l1":  # Minimize the L1 norm
            abs_error = opti.variable(init_guess=0, n_vars=np.length(
                y_data))  # Make the abs() of each error entry an opt. var.
            opti.subject_to([
                abs_error >= error,
                abs_error >= -error,
            ])
            opti.minimize(np.sum(weights * abs_error))

        elif residual_norm_type.lower() == "l2":  # Minimize the L2 norm
            opti.minimize(np.sum(weights * error**2))

        elif residual_norm_type.lower(
        ) == "linf":  # Minimize the L-infinity norm
            linf_value = opti.variable(
                init_guess=0
            )  # Make the value of the L-infinity norm an optimization variable
            opti.subject_to([
                linf_value >= weights * error, linf_value >= -weights * error
            ])
            opti.minimize(linf_value)

        else:
            raise ValueError("Bad input for the 'residual_type' parameter.")

        ### Add in the constraints specified by fit_type, which force the model to stay above / below the data points.
        if fit_type == "best":
            pass
        elif fit_type == "upper bound":
            opti.subject_to(y_model >= y_data)
        elif fit_type == "lower bound":
            opti.subject_to(y_model <= y_data)
        else:
            raise ValueError("Bad input for the 'fit_type' parameter.")

        ### Solve
        sol = opti.solve(verbose=verbose)

        ##### Construct a FittedModel

        ### Create a vector of solved parameters
        params_solved = {}
        for param_name in params:
            try:
                params_solved[param_name] = sol.value(params[param_name])
            except:
                params_solved[param_name] = np.NaN

        ### Store all the data and inputs
        self.model = model
        self.x_data = x_data
        self.y_data = y_data
        self.parameters = params_solved
        self.parameter_guesses = parameter_guesses
        self.parameter_bounds = parameter_bounds
        self.residual_norm_type = residual_norm_type
        self.fit_type = fit_type
        self.weights = weights
        self.put_residuals_in_logspace = put_residuals_in_logspace
def test_sum():
    a = np.arange(101)

    assert np.sum(a) == 5050  # Gauss would be proud.
Beispiel #25
0
    def constrain_derivative(
        self,
        derivative: cas.MX,
        variable: cas.MX,
        with_respect_to: Union[np.ndarray, cas.MX],
        method: str = "midpoint",
        regularize: bool = False,
    ) -> None:
        """
        Adds a constraint to the optimization problem such that:

            d(variable) / d(with_respect_to) == derivative

        Can be used directly; also called indirectly by opti.derivative_of() for implicit derivative creation.

        Args:
            derivative: The derivative that is to be constrained here.

            variable: The variable or quantity that you are taking the derivative of. The "numerator" of the
            derivative, in colloquial parlance.

            with_respect_to: The variable or quantity that you are taking the derivative with respect to. The
            "denominator" of the derivative, in colloquial parlance.

                In a typical example case, this `with_respect_to` parameter would be time. Please make sure that the
                value of this parameter is monotonically increasing, otherwise you may get nonsensical answers.

            method: The type of integrator to use to define this derivative. Options are:

                * "forward euler" - a first-order-accurate forward Euler method

                    Citation: https://en.wikipedia.org/wiki/Euler_method

                * "backwards euler" - a first-order-accurate backwards Euler method

                    Citation: https://en.wikipedia.org/wiki/Backward_Euler_method

                * "midpoint" or "trapezoid" - a second-order-accurate midpoint method

                    Citation: https://en.wikipedia.org/wiki/Midpoint_method

                * "simpson" - Simpson's rule for integration

                    Citation: https://en.wikipedia.org/wiki/Simpson%27s_rule

                * "runge-kutta" or "rk4" - a fourth-order-accurate Runge-Kutta method. I suppose that technically,
                "forward euler", "backward euler", and "midpoint" are all (lower-order) Runge-Kutta methods...

                    Citation: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#The_Runge%E2%80%93Kutta_method

                * "runge-kutta-3/8" - A modified version of the Runge-Kutta 4 proposed by Kutta in 1901. Also
                fourth-order-accurate, but all of the error coefficients are smaller than they are in the standard
                Runge-Kutta 4 method. The downside is that more floating point operations are required per timestep,
                as the Butcher tableau is more dense (i.e. not banded).

                    Citation: Kutta, Martin (1901), "Beitrag zur näherungsweisen Integration totaler
                    Differentialgleichungen", Zeitschrift für Mathematik und Physik, 46: 435–453

            Note that all methods are expressed as integrators rather than differentiators; this prevents
            singularities from forming in the limit of timestep approaching zero. (For those coming from the PDE
            world, this is analogous to using finite volume methods rather than finite difference methods to allow
            shock capturing.)

            regularize: Most of these integration methods result in N-1 constraints for a problem with N state
            variables. This makes the problem ill-posed, as there is an extra degree of freedom added to the problem.
            If the regularize flag is set True, we will automatically add one more constraint to make the problem
            well-posed. The specific constraint that is added depends on the integration method used.

        Returns: None (adds constraint in-place).

        """
        d_var = np.diff(variable)
        d_time = np.diff(with_respect_to)  # Calculate the timestep

        # TODO scale constraints by variable scale?
        # TODO make

        if method == "forward euler":
            raise NotImplementedError
            self.subject_to(d_var == derivative[:-1] * d_time)
            self.subject_to(derivative[-1] == derivative[
                -2]  # First-order constraint at last point
                            )

        elif method == "backward euler":
            raise NotImplementedError
            self.subject_to(d_var == derivative[1:] * d_time)
            self.subject_to(derivative[0] == derivative[
                1]  # First-order constraint at first point
                            )

        elif method == "midpoint" or method == "trapezoid":
            self.subject_to(d_var == np.trapz(derivative) * d_time, )
            if regularize:
                # Apply a second-order constraint at the first point
                coefficients = np.finite_difference_coefficients(
                    x=with_respect_to[:3],
                    x0=with_respect_to[0],
                    derivative_degree=1)
                derivative_value = np.sum(variable[:3] * coefficients)
                self.subject_to(derivative[0] == derivative_value)

        elif method == "simpson":
            raise NotImplementedError

        elif method == "runge-kutta" or method == "rk4":
            raise NotImplementedError

        elif method == "runge-kutta-3/8":
            raise NotImplementedError
Beispiel #26
0

    Uf_each, Vf_each, Wf_each = calculate_induced_velocity_horseshoe(
        x_field=wide(Xf),
        y_field=wide(Yf),
        z_field=wide(Zf),
        x_left=tall(lefts[:, 0]),
        y_left=tall(lefts[:, 1]),
        z_left=tall(lefts[:, 2]),
        x_right=tall(rights[:, 0]),
        y_right=tall(rights[:, 1]),
        z_right=tall(rights[:, 2]),
        gamma=tall(strengths),
    )

    Uf = np.sum(Uf_each, axis=0)
    Vf = np.sum(Vf_each, axis=0)
    Wf = np.sum(Wf_each, axis=0)

    pos = np.stack((Xf, Yf, Zf)).T
    dir = np.stack((Uf, Vf, Wf)).T

    dir_norm = np.reshape(np.linalg.norm(dir, axis=1), (-1, 1))

    dir = dir / dir_norm * dir_norm ** 0.2

    import pyvista as pv

    pv.set_plot_theme('dark')
    plotter = pv.Plotter()
    plotter.add_arrows(
Beispiel #27
0
import aerosandbox as asb
import aerosandbox.numpy as np

degree = 10

opti = asb.Opti()

coeffs = opti.variable(init_guess=np.zeros(degree + 1))

vandermonde = np.ones((len(x), degree + 1))
for j in range(1, degree + 1):
    vandermonde[:, j] = vandermonde[:, j - 1] * x

y_model = vandermonde @ coeffs

error = np.sum((y_model - y_data)**2)

abs_coeffs = opti.variable(init_guess=np.zeros(degree + 1))
opti.subject_to([abs_coeffs > coeffs, abs_coeffs > -coeffs])

opti.minimize(error + 1e-4 * np.sum(abs_coeffs))

sol = opti.solve(verbose=False)

if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import seaborn as sns

    sns.set(palette=sns.color_palette("husl"))

    fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
Beispiel #28
0
import aerosandbox as asb
import aerosandbox.numpy as np  # Whoa! What is this? Why are we writing this instead of `import numpy as np`? Don't worry, we'll talk about this in the next tutorial :)

N = 100  # Let's optimize in 100-dimensional space.

opti = asb.Opti()

# Define optimization variables
x = opti.variable(
    init_guess=np.ones(
        shape=N
    )  # Creates a variable with an initial guess that is [1, 1, 1, 1,...] with N entries.
)  # Note that the fact that we're declaring a vectorized variable was *inferred* automatically the shape of our initial guess.

# Define objective
f = np.sum(x**2)
opti.minimize(f)

# Optimize
sol = opti.solve()

# Extract values at the optimum
x_opt = sol.value(x)

# Print values
print(f"x = {x_opt}")
"""
We find the solution of this optimization problem to be a vector of 100 zeroes - makes sense.

Note that because this is a special case called a "quadratic program" and we're using a modern second-order optimizer 
(IPOPT) as the backend, this solves in just one iteration.
def patch_nans(
        array):  # TODO remove modification on incoming values; only patch nans
    """
    Patches NaN values in a 2D array. Can patch holes or entire regions. Uses Laplacian smoothing.
    :param array:
    :return:
    """
    original_nans = np.isnan(array)

    nanfrac = lambda array: np.sum(np.isnan(array)) / len(array.flatten())

    def item(i, j):
        if i < 0 or j < 0:  # don't allow wrapping other than what's controlled here
            return np.nan
        try:
            return array[i, j %
                         array.shape[1]]  # allow wrapping around day of year
        except IndexError:
            return np.nan

    print_title = lambda name: print(f"{name}\nIter | NaN Fraction")
    print_progress = lambda iter: print(f"{iter:4} | {nanfrac(array):.6f}")

    # Bridging
    print_title("Bridging")
    print_progress(0)
    iter = 1
    last_nanfrac = nanfrac(array)
    making_progress = True
    while making_progress:
        for i in range(array.shape[0]):
            for j in range(array.shape[1]):
                if not np.isnan(array[i, j]):
                    continue

                pairs = [
                    [item(i, j - 1), item(i, j + 1)],
                    [item(i - 1, j), item(i + 1, j)],
                    [item(i - 1, j + 1),
                     item(i + 1, j - 1)],
                    [item(i - 1, j - 1),
                     item(i + 1, j + 1)],
                ]

                for pair in pairs:
                    a = pair[0]
                    b = pair[1]

                    if not (np.isnan(a) or np.isnan(b)):
                        array[i, j] = (a + b) / 2
                        continue
        print_progress(iter)
        making_progress = nanfrac(array) != last_nanfrac
        last_nanfrac = nanfrac(array)
        iter += 1

    # Spreading
    for neighbors_to_spread in [4, 3, 2, 1]:
        print_title(f"Spreading with {neighbors_to_spread} neighbors")
        print_progress(0)
        iter = 1
        last_nanfrac = nanfrac(array)
        making_progress = True
        while making_progress:
            for i in range(array.shape[0]):
                for j in range(array.shape[1]):
                    if not np.isnan(array[i, j]):
                        continue

                    neighbors = np.array([
                        item(i, j - 1),
                        item(i, j + 1),
                        item(i - 1, j),
                        item(i + 1, j),
                        item(i - 1, j + 1),
                        item(i + 1, j - 1),
                        item(i - 1, j - 1),
                        item(i + 1, j + 1),
                    ])

                    valid_neighbors = neighbors[np.logical_not(
                        np.isnan(neighbors))]

                    if len(valid_neighbors) > neighbors_to_spread:
                        array[i, j] = np.mean(valid_neighbors)
            print_progress(iter)
            making_progress = nanfrac(array) != last_nanfrac
            last_nanfrac = nanfrac(array)
            iter += 1
        if last_nanfrac == 0:
            break

    assert last_nanfrac == 0, "Could not patch all NaNs!"

    # Diffusing
    print_title(
        "Diffusing"
    )  # TODO Perhaps use skimage gaussian blur kernel or similar instead of "+" stencil?
    for iter in range(50):
        print(f"{iter + 1:4}")
        for i in range(array.shape[0]):
            for j in range(array.shape[1]):
                if original_nans[i, j]:
                    neighbors = np.array([
                        item(i, j - 1),
                        item(i, j + 1),
                        item(i - 1, j),
                        item(i + 1, j),
                    ])

                    valid_neighbors = neighbors[np.logical_not(
                        np.isnan(neighbors))]

                    array[i, j] = np.mean(valid_neighbors)

    return array
Beispiel #30
0
velocity = opti.derivative_of(
    position,
    with_respect_to=time,
    derivative_init_guess=1,
)

force = opti.variable(init_guess=np.linspace(1, -1, n_timesteps),
                      n_vars=n_timesteps)

opti.constrain_derivative(
    variable=velocity,
    with_respect_to=time,
    derivative=force / mass_block,
)

effort_expended = np.sum(np.trapz(force**2) * np.diff(time))

opti.minimize(effort_expended)

### Boundary conditions
opti.subject_to([
    position[0] == 0,
    position[-1] == 1,
    velocity[0] == 0,
    velocity[-1] == 0,
])

sol = opti.solve()

import matplotlib.pyplot as plt
import seaborn as sns