Exemple #1
0
    def goodness_of_fit(self, type="R^2"):
        """
        Returns a metric of the goodness of the fit.

        Args:

            type: Type of metric to use for goodness of fit. One of:

                * R^2: The coefficient of determination. Strictly speaking only mathematically rigorous to use this
                for linear fits.

                    https://en.wikipedia.org/wiki/Coefficient_of_determination

        Returns: The metric of the goodness of the fit.

        """
        if type == "R^2":

            y_mean = np.mean(self.y_data)

            SS_tot = np.sum((self.y_data - y_mean)**2)

            y_model = self(self.x_data)

            SS_res = np.sum((self.y_data - y_model)**2)

            R_squared = 1 - SS_res / SS_tot

            return R_squared

        else:
            raise ValueError("Bad value of `type`!")
def test_block_move_minimum_time():
    opti = asb.Opti()

    n_timesteps = 300

    time = np.linspace(
        0,
        opti.variable(init_guess=1, lower_bound=0),
        n_timesteps,
    )

    dyn = asb.DynamicsPointMass1DHorizontal(
        mass_props=asb.MassProperties(mass=1),
        x_e=opti.variable(init_guess=np.linspace(0, 1, n_timesteps)),
        u_e=opti.variable(init_guess=1, n_vars=n_timesteps),
    )

    u = opti.variable(init_guess=np.linspace(1, -1, n_timesteps), lower_bound=-1, upper_bound=1)

    dyn.add_force(
        Fx=u
    )

    dyn.constrain_derivatives(
        opti=opti,
        time=time
    )

    opti.subject_to([
        dyn.x_e[0] == 0,
        dyn.x_e[-1] == 1,
        dyn.u_e[0] == 0,
        dyn.u_e[-1] == 0,
    ])

    opti.minimize(
        time[-1]
    )

    sol = opti.solve()

    dyn.substitute_solution(sol)

    assert dyn.x_e[0] == pytest.approx(0)
    assert dyn.x_e[-1] == pytest.approx(1)
    assert dyn.u_e[0] == pytest.approx(0)
    assert dyn.u_e[-1] == pytest.approx(0)
    assert np.max(dyn.u_e) == pytest.approx(1, abs=0.01)
    assert sol.value(u)[0] == pytest.approx(1, abs=0.05)
    assert sol.value(u)[-1] == pytest.approx(-1, abs=0.05)
    assert np.mean(np.abs(sol.value(u))) == pytest.approx(1, abs=0.01)
def patch_nans(
        array):  # TODO remove modification on incoming values; only patch nans
    """
    Patches NaN values in a 2D array. Can patch holes or entire regions. Uses Laplacian smoothing.
    :param array:
    :return:
    """
    original_nans = np.isnan(array)

    nanfrac = lambda array: np.sum(np.isnan(array)) / len(array.flatten())

    def item(i, j):
        if i < 0 or j < 0:  # don't allow wrapping other than what's controlled here
            return np.nan
        try:
            return array[i, j %
                         array.shape[1]]  # allow wrapping around day of year
        except IndexError:
            return np.nan

    print_title = lambda name: print(f"{name}\nIter | NaN Fraction")
    print_progress = lambda iter: print(f"{iter:4} | {nanfrac(array):.6f}")

    # Bridging
    print_title("Bridging")
    print_progress(0)
    iter = 1
    last_nanfrac = nanfrac(array)
    making_progress = True
    while making_progress:
        for i in range(array.shape[0]):
            for j in range(array.shape[1]):
                if not np.isnan(array[i, j]):
                    continue

                pairs = [
                    [item(i, j - 1), item(i, j + 1)],
                    [item(i - 1, j), item(i + 1, j)],
                    [item(i - 1, j + 1),
                     item(i + 1, j - 1)],
                    [item(i - 1, j - 1),
                     item(i + 1, j + 1)],
                ]

                for pair in pairs:
                    a = pair[0]
                    b = pair[1]

                    if not (np.isnan(a) or np.isnan(b)):
                        array[i, j] = (a + b) / 2
                        continue
        print_progress(iter)
        making_progress = nanfrac(array) != last_nanfrac
        last_nanfrac = nanfrac(array)
        iter += 1

    # Spreading
    for neighbors_to_spread in [4, 3, 2, 1]:
        print_title(f"Spreading with {neighbors_to_spread} neighbors")
        print_progress(0)
        iter = 1
        last_nanfrac = nanfrac(array)
        making_progress = True
        while making_progress:
            for i in range(array.shape[0]):
                for j in range(array.shape[1]):
                    if not np.isnan(array[i, j]):
                        continue

                    neighbors = np.array([
                        item(i, j - 1),
                        item(i, j + 1),
                        item(i - 1, j),
                        item(i + 1, j),
                        item(i - 1, j + 1),
                        item(i + 1, j - 1),
                        item(i - 1, j - 1),
                        item(i + 1, j + 1),
                    ])

                    valid_neighbors = neighbors[np.logical_not(
                        np.isnan(neighbors))]

                    if len(valid_neighbors) > neighbors_to_spread:
                        array[i, j] = np.mean(valid_neighbors)
            print_progress(iter)
            making_progress = nanfrac(array) != last_nanfrac
            last_nanfrac = nanfrac(array)
            iter += 1
        if last_nanfrac == 0:
            break

    assert last_nanfrac == 0, "Could not patch all NaNs!"

    # Diffusing
    print_title(
        "Diffusing"
    )  # TODO Perhaps use skimage gaussian blur kernel or similar instead of "+" stencil?
    for iter in range(50):
        print(f"{iter + 1:4}")
        for i in range(array.shape[0]):
            for j in range(array.shape[1]):
                if original_nans[i, j]:
                    neighbors = np.array([
                        item(i, j - 1),
                        item(i, j + 1),
                        item(i - 1, j),
                        item(i + 1, j),
                    ])

                    valid_neighbors = neighbors[np.logical_not(
                        np.isnan(neighbors))]

                    array[i, j] = np.mean(valid_neighbors)

    return array
Exemple #4
0
    def variable(self,
                 init_guess: Union[float, np.ndarray],
                 n_vars: int = None,
                 scale: float = None,
                 freeze: bool = False,
                 log_transform: bool = False,
                 category: str = "Uncategorized",
                 lower_bound: float = None,
                 upper_bound: float = None,
                 ) -> cas.MX:
        """
        Initializes a new decision variable (or vector of decision variables). You must pass an initial guess (
        `init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be
        overridden; see below for syntax.

        It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for
        nonconvex problems, although this is not strictly required.

        Args:

            init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the
            design space the optimizer will start looking.

                This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar,
                vector) that is created will be automatically inferred from the shape of the initial guess you
                provide here. (Although it can also be overridden using the `n_vars` parameter; see below.)

                For scalar variables, your initial guess should be a float:

                >>> opti = asb.Opti()
                >>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5

                For vector variables, your initial guess should be either:

                    * a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar
                    variable will be created:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

                    * a NumPy ndarray, in which case each element will be initialized to the corresponding value in
                    the given array:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of
                    >>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5.

                In the case where the variable is to be log-transformed (see `log_transform`), the initial guess
                should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the
                initial guess happens under the hood.) The initial guess must, of course, be a positive number in
                this case.

            n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not
            provided, the dimensionality of the variable is inferred from the initial guess `init_guess`.

                The only real case where you need to use this argument would be if you are initializing a vector
                variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`.
                For example:

                    >>> opti = asb.Opti()
                    >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
                    >>> # 10, with all 10 elements set to an initial guess of 5.

            scale: [Optional] Approximate scale of the variable.

                For example, if you're optimizing the design of a automobile and setting the tire diameter as an
                optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters.

                Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer
                converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are
                theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make
                solving poorly-scaled problems really difficult or impossible. See here for more info:
                https://web.casadi.org/blog/nlp-scaling/

                If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`.

            freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In
            order to select the determine to freeze the variable at, the optimizer will use the following logic:

                    * If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze
                    the variable at the value of initial guess.

                        >>> opti = Opti()
                        >>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5.

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, and you didn't manually specify to
                    freeze the variable: the variable will be frozen based on the value in the cache file (and ignore
                    the `init_guess`). Example:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing")
                        >>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess)

                    * If the Opti instance is associated with a cache file, and you told it to freeze a specific
                    category(s) of variables that your variable is a member of, but you then manually specified that
                    the variable should be frozen: the variable will once again be frozen at the value of `init_guess`:

                        >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
                        >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
                        >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True)
                        >>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.)

            Motivation for freezing variables:

                The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say
                we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design
                - any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet!
                However, the farther we get into the design process, the more things get "locked in" - we may have
                ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later (
                let's say that we discover that one of our assumptions was too optimistic halfway through the design
                process), we have to make up for that lost margin using only the variables that are still free. To do
                this, we would freeze the variables that are already decided on.

                By categorizing variables, you can also freeze entire categories of variables. For example,
                you can freeze all of the wing design variables for an airplane but leave all of the fuselage
                variables free.

                This idea of freezing variables can also be used to look at off-design performance - freeze a
                design, but change the operating conditions.

            log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable
            before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical
            if negative (e.g. mass). Log-transforming these variables can also help maintain convexity.

            category: [Optional] What category of variables does this belong to?

        Usage notes:

            When using vector variables, individual components of this vector of variables can be accessed via normal
            indexing. Example:
                >>> opti = asb.Opti()
                >>> my_var = opti.variable(n_vars = 5)
                >>> opti.subject_to(my_var[3] >= my_var[2])  # This is a valid way of indexing
                >>> my_sum = asb.sum(my_var)  # This will sum up all elements of `my_var`

        Returns:
            The variable itself as a symbolic CasADi variable (MX type).

        """
        ### Set defaults
        if n_vars is None:  # Infer dimensionality from init_guess if it is not provided
            n_vars = np.length(init_guess)
        if scale is None:  # Infer a scale from init_guess if it is not provided
            if log_transform:
                scale = 1
            else:
                scale = np.mean(np.fabs(init_guess))  # Initialize the scale to a heuristic based on the init_guess
                if scale == 0:  # If that heuristic leads to a scale of 0, use a scale of 1 instead.
                    scale = 1

                # scale = np.fabs(
                #     np.where(
                #         init_guess != 0,
                #         init_guess,
                #         1
                #     ))

        # Validate the inputs
        if log_transform:
            if np.any(init_guess <= 0):
                raise ValueError(
                    "If you are initializing a log-transformed variable, the initial guess(es) must all be positive.")
        if np.any(scale <= 0):
            raise ValueError("The 'scale' argument must be a positive number.")

        # If the variable is in a category to be frozen, fix the variable at the initial guess.
        is_manually_frozen = freeze
        if category in self.variable_categories_to_freeze:
            freeze = True

        # If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
        if freeze:
            var = self.parameter(n_params=n_vars, value=init_guess)
        else:
            if not log_transform:
                var = scale * super().variable(n_vars)
                self.set_initial(var, init_guess)
            else:
                log_scale = scale / init_guess
                log_var = log_scale * super().variable(n_vars)
                var = np.exp(log_var)
                self.set_initial(log_var, np.log(init_guess))

        # Track the variable
        if category not in self.variables_categorized:  # Add a category if it does not exist
            self.variables_categorized[category] = []
        self.variables_categorized[category].append(var)
        var.is_manually_frozen = is_manually_frozen

        # Apply bounds
        if lower_bound is not None:
            self.subject_to(var >= lower_bound)
        if upper_bound is not None:
            self.subject_to(var <= upper_bound)

        return var
def test_mean():
    a = np.linspace(0, 10, 50)

    assert np.mean(a) == pytest.approx(5)
Exemple #6
0
    D = 0.5 * rho * S * C_D * V ** 2

    opti.subject_to([
        W_f >= TSFC * T_flight * D,
    ])

    V_f = W_f / g / rho_f
    V_f_wing = 0.03 * S ** 1.5 / AR ** 0.5 * tau  # linear with b and tau, quadratic with chord

    V_f_avail = V_f_wing + V_f_fuse

    opti.subject_to(
        V_f_avail >= V_f
    )

    opti.minimize(W_f)

    sol = opti.solve(verbose=False)

def timeit():
    start = time.time()
    solve()
    end = time.time()
    return end - start

if __name__ == '__main__':
    times = np.array([
        timeit() for i in range(10)
    ])
    print(np.mean(times))
speeds = data["speeds"].reshape(len(alts_v), len(lats_v)).T.flatten()

lats, alts = np.meshgrid(lats_v, alts_v, indexing="ij")
lats = lats.flatten()
alts = alts.flatten()

# %%

lats_scaled = (lats - 37.5) / 11.5
alts_scaled = (alts - 24200) / 24200
speeds_scaled = (speeds - 7) / 56

alt_diff = np.diff(alts_v)
alt_diff_aug = np.hstack((alt_diff[0], alt_diff, alt_diff[-1]))
weights_1d = (alt_diff_aug[:-1] + alt_diff_aug[1:]) / 2
weights_1d = weights_1d / np.mean(weights_1d)
# region_of_interest = np.logical_and(
#     alts_v > 10000,
#     alts_v < 40000
# )
# true_weights = np.where(
#     region_of_interest,
#     2,
#     1
# )
weights = np.tile(weights_1d, (93, 1)).flatten()

# %%


def model(x, p):