def test_where_numpy(): a = np.ones(4) b = 2 * np.ones(4) c = np.where(np.array([True, False, True, False]), a, b) assert np.all(c == np.array([1, 2, 1, 2]))
def types(): ### NumPy data types scalar_np = np.array(1) vector_np = np.array([1, 1]) matrix_np = np.ones((2, 2)) ### CasADi data types opti = Opti() scalar_cas = opti.variable(init_guess=1) vector_cas = opti.variable(n_vars=2, init_guess=1) ### Dynamically-typed data type creation (i.e. type depends on inputs) vector_dynamic = np.array([scalar_cas, scalar_cas]) # vector as a dynamic-typed array matrix_dynamic = np.array([ # matrix as an dynamic-typed array [scalar_cas, scalar_cas], [scalar_cas, scalar_cas] ]) ### Create lists of possible variable types for scalars, vectors, and matrices. scalar_options = [scalar_cas, scalar_np] vector_options = [vector_cas, vector_np, vector_dynamic] matrix_options = [matrix_np, matrix_dynamic] return { "scalar": scalar_options, "vector": vector_options, "matrix": matrix_options, "all": scalar_options + vector_options + matrix_options }
def reshape(value): try: return np.reshape(value, shape_for_reshaping) except ValueError: if isinstance(value, int) or isinstance(value, float) or value.shape == tuple() or np.product( value.shape) == 1: return value * np.ones(shape_for_reshaping) raise ValueError("Could not reshape value of one of the inputs!")
def make_matrix(x): matrix = np.ones((len(x), degree + 1)) for j in range(1, degree - 2): matrix[:, j] = matrix[:, j - 1] * x matrix[:, degree - 2] = np.cos(x) matrix[:, degree - 1] = np.sin(x) matrix[:, degree] = np.exp(x) return matrix
def test_rocket_control_problem(plot=False): ### Constants T = 100 d = 50 delta = 1e-3 f = 1000 c = np.ones(T) ### Optimization opti = asb.Opti() # set up an optimization environment x = opti.variable(init_guess=np.linspace(0, d, T)) # position v = opti.variable(init_guess=d / T, n_vars=T) # velocity a = opti.variable(init_guess=0, n_vars=T) # acceleration gamma = opti.variable(init_guess=0, n_vars=T) # instantaneous fuel consumption a_max = opti.variable(init_guess=0) # maximum acceleration opti.subject_to([ cas.diff(x) == v[:-1], # physics cas.diff(v) == a[:-1], # physics x[0] == 0, # boundary condition v[0] == 0, # boundary condition x[-1] == d, # boundary condition v[-1] == 0, # boundary condition gamma >= c * a, # lower bound on instantaneous fuel consumption gamma >= -c * a, # lower bound on instantaneous fuel consumption cas.sum1(gamma) <= f, # fuel consumption limit cas.diff(a) <= delta, # jerk limits cas.diff(a) >= -delta, # jerk limits a_max >= a, # lower bound on maximum acceleration a_max >= -a, # lower bound on maximum acceleration ]) opti.minimize(a_max) # minimize the peak acceleration sol = opti.solve() # solve assert sol.value(a_max) == pytest.approx( 0.02181991952, rel=1e-3) # solved externally with Julia JuMP if plot: import matplotlib.pyplot as plt import seaborn as sns sns.set(palette=sns.color_palette("husl")) fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200) for i, val, lab in zip(np.arange(3), [x, v, a], ["$x$", "$v$", "$a$"]): plt.subplot(3, 1, i + 1) plt.plot(sol.value(val), label=lab) plt.xlabel(r"Time [s]") plt.ylabel(lab) plt.legend() plt.suptitle(r"Rocket Trajectory") plt.tight_layout() plt.show()
def test_length(): assert length(5) == 1 assert length(5.) == 1 assert length([1, 2, 3]) == 3 assert length(np.array(5)) == 1 assert length(np.array([5])) == 1 assert length(np.array([1, 2, 3])) == 3 assert length(np.ones((3, 2))) == 3 assert length(cas.GenMX_ones(5)) == 5
from data import x, y_data import aerosandbox as asb import aerosandbox.numpy as np degree = 10 opti = asb.Opti() coeffs = opti.variable(init_guess=np.zeros(degree + 1)) vandermonde = np.ones((len(x), degree + 1)) for j in range(1, degree + 1): vandermonde[:, j] = vandermonde[:, j - 1] * x y_model = vandermonde @ coeffs error = np.sum((y_model - y_data)**2) abs_coeffs = opti.variable(init_guess=np.zeros(degree + 1)) opti.subject_to([abs_coeffs > coeffs, abs_coeffs > -coeffs]) opti.minimize(error + 1e-4 * np.sum(abs_coeffs)) sol = opti.solve(verbose=False) if __name__ == '__main__': import matplotlib.pyplot as plt import seaborn as sns sns.set(palette=sns.color_palette("husl"))
contains n entries, each of which is a scalar variable. Let's demonstrate this by finding the minimum of the n-dimensional sphere problem. """ import aerosandbox as asb import aerosandbox.numpy as np # Whoa! What is this? Why are we writing this instead of `import numpy as np`? Don't worry, we'll talk about this in the next tutorial :) N = 100 # Let's optimize in 100-dimensional space. opti = asb.Opti() # Define optimization variables x = opti.variable( init_guess=np.ones( shape=N ) # Creates a variable with an initial guess that is [1, 1, 1, 1,...] with N entries. ) # Note that the fact that we're declaring a vectorized variable was *inferred* automatically the shape of our initial guess. # Define objective f = np.sum(x**2) opti.minimize(f) # Optimize sol = opti.solve() # Extract values at the optimum x_opt = sol.value(x) # Print values print(f"x = {x_opt}")
def __init__( self, model: Callable[ [Union[np.ndarray, Dict[str, np.ndarray]], Dict[str, float]], np.ndarray], x_data: Union[np.ndarray, Dict[str, np.ndarray]], y_data: np.ndarray, parameter_guesses: Dict[str, float], parameter_bounds: Dict[str, tuple] = None, residual_norm_type: str = "L2", fit_type: str = "best", weights: np.ndarray = None, put_residuals_in_logspace: bool = False, verbose=True, ): """ Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach. Args: model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where: * x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n]. * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x as a 1D ndarray. (If you do this, just give `x_data` as an array.) * p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value]. Model should return a 1D ndarray of length n. Basically, if you've done it right: >>> model(x_data, parameter_guesses) should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types should be happy.) Model should use aerosandbox.numpy operators. The model is not allowed to make any in-place changes to the input `x`. The most common way this manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x = x + 3`. x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is { var_name:var_data}. * If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.) y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n] parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}. * Parameters will be initialized to the values set here; all parameters need an initial guess. * param_initial_guess is a float; note that only scalar parameters are allowed. parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}. * May contain only a subset of param_guesses if desired. * Use None to represent one-sided constraints (i.e. (None, 5)). residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options: * "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers. * "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default. * "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers. fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm), or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate modeling, so that you can put bounds on modeling error): * "best": finds the model of best fit. Usually, this is what you want. * "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize the specified residual norm). * "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize the specified residual norm). weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform. * Weights are automatically normalized. [1D ndarray of length n] put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error (useful for minimizing percent error). Note: If any model outputs or data are negative, this will raise an error! verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See `aerosandbox.Opti.solve(verbose=)` syntax for more details. Returns: A model in the form of a FittedModel object. Some things you can do: >>> y = FittedModel(x) # evaluate the FittedModel at new x points >>> FittedModel.parameters # directly examine the optimal values of the parameters that were found >>> FittedModel.plot() # plot the fit """ super().__init__() ##### Prepare all inputs, check types/sizes. ### Flatten all inputs def flatten(input): return np.array(input).flatten() try: x_data = {k: flatten(v) for k, v in x_data.items()} x_data_is_dict = True except AttributeError: # If it's not a dict or dict-like, assume it's a 1D ndarray dataset x_data = flatten(x_data) x_data_is_dict = False y_data = flatten(y_data) n_datapoints = np.length(y_data) ### Handle weighting if weights is None: weights = np.ones(n_datapoints) else: weights = flatten(weights) sum_weights = np.sum(weights) if sum_weights <= 0: raise ValueError("The weights must sum to a positive number!") if np.any(weights < 0): raise ValueError( "No entries of the weights vector are allowed to be negative!") weights = weights / np.sum( weights) # Normalize weights so that they sum to 1. ### Check format of parameter_bounds input if parameter_bounds is None: parameter_bounds = {} for param_name, v in parameter_bounds.items(): if param_name not in parameter_guesses.keys(): raise ValueError( f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses." ) if not np.length(v) == 2: raise ValueError( "Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). " "For one-sided bounds, use None for the unbounded side.") ### If putting residuals in logspace, check positivity if put_residuals_in_logspace: if not np.all(y_data > 0): raise ValueError( "You can't fit a model with residuals in logspace if y_data is not entirely positive!" ) ### Check dimensionality of inputs to fitting algorithm relevant_inputs = { "y_data": y_data, "weights": weights, } try: relevant_inputs.update(x_data) except TypeError: relevant_inputs.update({"x_data": x_data}) for key, value in relevant_inputs.items(): # Check that the length of the inputs are consistent series_length = np.length(value) if not series_length == n_datapoints: raise ValueError( f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}." ) ##### Formulate and solve the fitting optimization problem ### Initialize an optimization environment opti = Opti() ### Initialize the parameters as optimization variables params = {} for param_name, param_initial_guess in parameter_guesses.items(): if param_name in parameter_bounds: params[param_name] = opti.variable( init_guess=param_initial_guess, lower_bound=parameter_bounds[param_name][0], upper_bound=parameter_bounds[param_name][1], ) else: params[param_name] = opti.variable( init_guess=param_initial_guess, ) ### Evaluate the model at the data points you're trying to fit x_data_original = copy.deepcopy( x_data ) # Make a copy of x_data so that you can determine if the model did in-place operations on x and tattle on the user. try: y_model = model(x_data, params) # Evaluate the model except Exception: raise Exception(""" There was an error when evaluating the model you supplied with the x_data you supplied. Likely possible causes: * Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters. * Your model should take in p as a dict of parameters, but it does not. * Your model assumes x is an array-like but you provided x_data as a dict, or vice versa. See the docstring of FittedModel() if you have other usage questions or would like to see examples. """) try: ### If the model did in-place operations on x_data, throw an error x_data_is_unchanged = np.all(x_data == x_data_original) except ValueError: x_data_is_unchanged = np.all([ x_series == x_series_original for x_series, x_series_original in zip(x_data, x_data_original) ]) if not x_data_is_unchanged: raise TypeError( "model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!" ) if y_model is None: # Make sure that y_model actually returned something sensible raise TypeError( "model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray." ) ### Compute how far off you are (error) if not put_residuals_in_logspace: error = y_model - y_data else: y_model = np.fmax( y_model, 1e-300 ) # Keep y_model very slightly always positive, so that log() doesn't NaN. error = np.log(y_model) - np.log(y_data) ### Set up the optimization problem to minimize some norm(error), which looks different depending on the norm used: if residual_norm_type.lower() == "l1": # Minimize the L1 norm abs_error = opti.variable(init_guess=0, n_vars=np.length( y_data)) # Make the abs() of each error entry an opt. var. opti.subject_to([ abs_error >= error, abs_error >= -error, ]) opti.minimize(np.sum(weights * abs_error)) elif residual_norm_type.lower() == "l2": # Minimize the L2 norm opti.minimize(np.sum(weights * error**2)) elif residual_norm_type.lower( ) == "linf": # Minimize the L-infinity norm linf_value = opti.variable( init_guess=0 ) # Make the value of the L-infinity norm an optimization variable opti.subject_to([ linf_value >= weights * error, linf_value >= -weights * error ]) opti.minimize(linf_value) else: raise ValueError("Bad input for the 'residual_type' parameter.") ### Add in the constraints specified by fit_type, which force the model to stay above / below the data points. if fit_type == "best": pass elif fit_type == "upper bound": opti.subject_to(y_model >= y_data) elif fit_type == "lower bound": opti.subject_to(y_model <= y_data) else: raise ValueError("Bad input for the 'fit_type' parameter.") ### Solve sol = opti.solve(verbose=verbose) ##### Construct a FittedModel ### Create a vector of solved parameters params_solved = {} for param_name in params: try: params_solved[param_name] = sol.value(params[param_name]) except: params_solved[param_name] = np.NaN ### Store all the data and inputs self.model = model self.x_data = x_data self.y_data = y_data self.parameters = params_solved self.parameter_guesses = parameter_guesses self.parameter_bounds = parameter_bounds self.residual_norm_type = residual_norm_type self.fit_type = fit_type self.weights = weights self.put_residuals_in_logspace = put_residuals_in_logspace
def get_kulfan_coordinates( lower_weights=-0.2 * np.ones(5), # type: np.ndarray upper_weights=0.2 * np.ones(5), # type: np.ndarray enforce_continuous_LE_radius=True, TE_thickness=0., # type: float n_points_per_side=_default_n_points_per_side, # type: int N1=0.5, # type: float N2=1.0, # type: float ) -> np.ndarray: """ Calculates the coordinates of a Kulfan (CST) airfoil. To make a Kulfan (CST) airfoil, use the following syntax: asb.Airfoil("My Airfoil Name", coordinates = asb.kulfan_coordinates(*args)) More on Kulfan (CST) airfoils: http://brendakulfan.com/docs/CST2.pdf Notes on N1, N2 (shape factor) combinations: * 0.5, 1: Conventional airfoil * 0.5, 0.5: Elliptic airfoil * 1, 1: Biconvex airfoil * 0.75, 0.75: Sears-Haack body (radius distribution) * 0.75, 0.25: Low-drag projectile * 1, 0.001: Cone or wedge airfoil * 0.001, 0.001: Rectangle, circular duct, or circular rod. :param lower_weights: :param upper_weights: :param enforce_continuous_LE_radius: Enforces a continous leading-edge radius by throwing out the first lower weight. :param TE_thickness: :param n_points_per_side: :param N1: LE shape factor :param N2: TE shape factor :return: """ if enforce_continuous_LE_radius: lower_weights[0] = -1 * upper_weights[0] x_lower = np.cosspace(0, 1, n_points_per_side) x_upper = x_lower[::-1] x_lower = x_lower[ 1:] # Trim off the nose coordinate so there are no duplicates def shape(w, x): # Class function C = x**N1 * (1 - x)**N2 # Shape function (Bernstein polynomials) n = len(w) - 1 # Order of Bernstein polynomials K = comb(n, np.arange(n + 1)) # Bernstein polynomial coefficients S_matrix = (w * K * np.expand_dims(x, 1)**np.arange(n + 1) * np.expand_dims(1 - x, 1)**(n - np.arange(n + 1)) ) # Polynomial coefficient * weight matrix # S = np.sum(S_matrix, axis=1) S = np.array( [np.sum(S_matrix[i, :]) for i in range(S_matrix.shape[0])]) # Calculate y output y = C * S return y y_lower = shape(lower_weights, x_lower) y_upper = shape(upper_weights, x_upper) # TE thickness y_lower -= x_lower * TE_thickness / 2 y_upper += x_upper * TE_thickness / 2 x = np.concatenate([x_upper, x_lower]) y = np.concatenate([y_upper, y_lower]) coordinates = np.vstack((x, y)).T return coordinates
def draw( self, vehicle_model: Airplane = None, backend: str = "pyvista", draw_axes: bool = True, scale_vehicle_model: Union[float, None] = None, n_vehicles_to_draw: int = 10, cg_axes: str = "geometry", show: bool = True, ): if backend == "pyvista": import pyvista as pv import aerosandbox.tools.pretty_plots as p if vehicle_model is None: default_vehicle_stl = _asb_root / "dynamics/visualization/default_assets/yf23.stl" vehicle_model = pv.read(str(default_vehicle_stl)) elif isinstance(vehicle_model, pv.PolyData): pass elif isinstance(vehicle_model, Airplane): vehicle_model = vehicle_model.draw(backend="pyvista", show=False) vehicle_model.rotate_y( 180) # Rotate from geometry axes to body axes. elif isinstance( vehicle_model, str ): # Interpret the string as a filepath to a .stl or similar try: pv.read(filename=vehicle_model) except: raise ValueError("Could not parse `vehicle_model`!") else: raise TypeError( "`vehicle_model` should be an Airplane or PolyData object." ) x_e = np.array(self.x_e) y_e = np.array(self.y_e) z_e = np.array(self.z_e) if np.length(x_e) == 1: x_e = x_e * np.ones(len(self)) if np.length(y_e) == 1: y_e = y_e * np.ones(len(self)) if np.length(z_e) == 1: z_e = z_e * np.ones(len(self)) if scale_vehicle_model is None: trajectory_bounds = np.array([ [x_e.min(), x_e.max()], [y_e.min(), y_e.max()], [z_e.min(), z_e.max()], ]) trajectory_size = np.max(np.diff(trajectory_bounds, axis=1)) vehicle_bounds = np.array(vehicle_model.bounds).reshape((3, 2)) vehicle_size = np.max(np.diff(vehicle_bounds, axis=1)) scale_vehicle_model = 0.1 * trajectory_size / vehicle_size ### Initialize the plotter plotter = pv.Plotter() # Set the window title title = "ASB Dynamics" addenda = [] if scale_vehicle_model != 1: addenda.append( f"Vehicle drawn at {scale_vehicle_model:.2g}x scale") addenda.append(f"{self.__class__.__name__} Engine") if len(addenda) != 0: title = title + f" ({'; '.join(addenda)})" plotter.title = title # Draw axes and grid plotter.add_axes() plotter.show_grid(color='gray') ### Draw the vehicle for i in np.unique( np.round(np.linspace(0, len(self) - 1, n_vehicles_to_draw))).astype(int): dyn = self[i] try: phi = dyn.phi except AttributeError: phi = dyn.bank try: theta = dyn.theta except AttributeError: theta = dyn.gamma try: psi = dyn.psi except AttributeError: psi = dyn.track x_cg_b, y_cg_b, z_cg_b = dyn.convert_axes(dyn.mass_props.x_cg, dyn.mass_props.y_cg, dyn.mass_props.z_cg, from_axes=cg_axes, to_axes="body") this_vehicle = copy.deepcopy(vehicle_model) this_vehicle.translate([ -x_cg_b, -y_cg_b, -z_cg_b, ], inplace=True) this_vehicle.points *= scale_vehicle_model this_vehicle.rotate_x(np.degrees(phi), inplace=True) this_vehicle.rotate_y(np.degrees(theta), inplace=True) this_vehicle.rotate_z(np.degrees(psi), inplace=True) this_vehicle.translate([ dyn.x_e, dyn.y_e, dyn.z_e, ], inplace=True) plotter.add_mesh(this_vehicle, ) if draw_axes: rot = np.rotation_matrix_from_euler_angles(phi, theta, psi) axes_scale = 0.5 * np.max( np.diff(np.array(this_vehicle.bounds).reshape((3, -1)), axis=1)) origin = np.array([ dyn.x_e, dyn.y_e, dyn.z_e, ]) for i, c in enumerate(["r", "g", "b"]): plotter.add_mesh( pv.Spline( np.array( [origin, origin + rot[:, i] * axes_scale])), color=c, line_width=2.5, ) for i in range(len(self)): ### Draw the trajectory line polyline = pv.Spline(np.array([x_e, y_e, z_e]).T) plotter.add_mesh( polyline, color=p.adjust_lightness(p.palettes["categorical"][0], 1.2), line_width=3, ) ### Finalize the plotter plotter.camera.up = (0, 0, -1) plotter.camera.Azimuth(90) plotter.camera.Elevation(60) if show: plotter.show() return plotter
Using this model because it satisfies some things that should be true in asymptotic limits: As the fineness ratio goes to infinity, the drag-divergent Mach should go to 1. As the fineness ratio goes to 0, the drag-divergent Mach should go to some reasonable value in the range of 0 to 1, probably around 0.5? Certainly no more than 0.6, I imagine. (intuition) """ return 1 - (p["a"] / (fr + p["b"]))**p["c"] fit = asb.FittedModel(model=model, x_data=np.concatenate([sub[:, 0], sup[:, 0]]), y_data=np.concatenate([sub[:, 1], sup[:, 1]]), weights=np.concatenate([ np.ones(len(sub)) / len(sub), np.ones(len(sup)) / len(sup), ]), parameter_guesses={ "a": 0.5, "b": 3, "c": 1, }, parameter_bounds={ "a": (0, None), "b": (0, None), "c": (0, None) }, residual_norm_type="L2") plt.plot(fr, fit(fr), "-k", label="Fit")
def variable( self, init_guess: Union[float, np.ndarray] = None, n_vars: int = None, scale: float = None, freeze: bool = False, log_transform: bool = False, category: str = "Uncategorized", lower_bound: float = None, upper_bound: float = None, ) -> cas.MX: """ Initializes a new decision variable (or vector of decision variables). You should pass an initial guess ( `init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be overridden; see below for syntax. It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for nonconvex problems, although this is not strictly required. Args: init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the design space the optimizer will start looking. This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar, vector) that is created will be automatically inferred from the shape of the initial guess you provide here. (Although it can also be overridden using the `n_vars` parameter; see below.) For scalar variables, your initial guess should be a float: >>> opti = asb.Opti() >>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5 For vector variables, your initial guess should be either: * a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar variable will be created: >>> opti = asb.Opti() >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length >>> # 10, with all 10 elements set to an initial guess of 5. * a NumPy ndarray, in which case each element will be initialized to the corresponding value in the given array: >>> opti = asb.Opti() >>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of >>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5. In the case where the variable is to be log-transformed (see `log_transform`), the initial guess should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the initial guess happens under the hood.) The initial guess must, of course, be a positive number in this case. n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not provided, the dimensionality of the variable is inferred from the initial guess `init_guess`. The only real case where you need to use this argument would be if you are initializing a vector variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`. For example: >>> opti = asb.Opti() >>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length >>> # 10, with all 10 elements set to an initial guess of 5. scale: [Optional] Approximate scale of the variable. For example, if you're optimizing the design of a automobile and setting the tire diameter as an optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters. Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make solving poorly-scaled problems really difficult or impossible. See here for more info: https://web.casadi.org/blog/nlp-scaling/ If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`. freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In order to select the determine to freeze the variable at, the optimizer will use the following logic: * If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze the variable at the value of initial guess. >>> opti = Opti() >>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5. * If the Opti instance is associated with a cache file, and you told it to freeze a specific category(s) of variables that your variable is a member of, and you didn't manually specify to freeze the variable: the variable will be frozen based on the value in the cache file (and ignore the `init_guess`). Example: >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"]) >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10. >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing") >>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess) * If the Opti instance is associated with a cache file, and you told it to freeze a specific category(s) of variables that your variable is a member of, but you then manually specified that the variable should be frozen: the variable will once again be frozen at the value of `init_guess`: >>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"]) >>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10. >>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True) >>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.) Motivation for freezing variables: The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design - any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet! However, the farther we get into the design process, the more things get "locked in" - we may have ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later ( let's say that we discover that one of our assumptions was too optimistic halfway through the design process), we have to make up for that lost margin using only the variables that are still free. To do this, we would freeze the variables that are already decided on. By categorizing variables, you can also freeze entire categories of variables. For example, you can freeze all of the wing design variables for an airplane but leave all of the fuselage variables free. This idea of freezing variables can also be used to look at off-design performance - freeze a design, but change the operating conditions. log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical if negative (e.g. mass). Log-transforming these variables can also help maintain convexity. category: [Optional] What category of variables does this belong to? Usage notes: When using vector variables, individual components of this vector of variables can be accessed via normal indexing. Example: >>> opti = asb.Opti() >>> my_var = opti.variable(n_vars = 5) >>> opti.subject_to(my_var[3] >= my_var[2]) # This is a valid way of indexing >>> my_sum = asb.sum(my_var) # This will sum up all elements of `my_var` Returns: The variable itself as a symbolic CasADi variable (MX type). """ ### Set defaults if init_guess is None: import warnings if log_transform: init_guess = 1 warnings.warn( "No initial guess set for Opti.variable(). Defaulting to 1 (log-transformed variable).", stacklevel=2) else: init_guess = 0 warnings.warn( "No initial guess set for Opti.variable(). Defaulting to 0.", stacklevel=2) if n_vars is None: # Infer dimensionality from init_guess if it is not provided n_vars = np.length(init_guess) if scale is None: # Infer a scale from init_guess if it is not provided if log_transform: scale = 1 else: scale = np.mean( np.fabs(init_guess) ) # Initialize the scale to a heuristic based on the init_guess if isinstance( scale, cas.MX ) or scale == 0: # If that heuristic leads to a scale of 0, use a scale of 1 instead. scale = 1 # scale = np.fabs( # np.where( # init_guess != 0, # init_guess, # 1 # )) # Try to convert init_guess to a float or np.ndarray if it is an Opti parameter. try: init_guess = self.value(init_guess) except RuntimeError as e: raise TypeError( "The `init_guess` for a new Opti variable must not be a function of an existing Opti variable." ) # Validate the inputs if log_transform: if np.any(init_guess <= 0): raise ValueError( "If you are initializing a log-transformed variable, the initial guess(es) must all be positive." ) if np.any(scale <= 0): raise ValueError("The 'scale' argument must be a positive number.") # If the variable is in a category to be frozen, fix the variable at the initial guess. is_manually_frozen = freeze if (category in self.variable_categories_to_freeze or category == self.variable_categories_to_freeze or self.variable_categories_to_freeze == "all"): freeze = True # If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics. if freeze: if self.freeze_style == "parameter": var = self.parameter(n_params=n_vars, value=init_guess) elif self.freeze_style == "float": if n_vars == 1: var = init_guess else: var = init_guess * np.ones(n_vars) else: raise ValueError("Bad value of `Opti.freeze_style`!") else: if not log_transform: var = scale * super().variable(n_vars) self.set_initial(var, init_guess) else: log_scale = scale / init_guess log_var = log_scale * super().variable(n_vars) var = np.exp(log_var) self.set_initial(log_var, np.log(init_guess)) # Track the variable if category not in self.variables_categorized: # Add a category if it does not exist self.variables_categorized[category] = [] self.variables_categorized[category].append(var) try: var.is_manually_frozen = is_manually_frozen except AttributeError: pass # Apply bounds if not (freeze and self.ignore_violated_parametric_constraints): if not log_transform: if lower_bound is not None: self.subject_to(var / scale >= lower_bound / scale) if upper_bound is not None: self.subject_to(var / scale <= upper_bound / scale) else: if lower_bound is not None: self.subject_to( log_var / log_scale >= np.log(lower_bound) / log_scale) if upper_bound is not None: self.subject_to( log_var / log_scale <= np.log(upper_bound) / log_scale) return var