Example #1
0
    def __call__(self, x):
        if isinstance(self.x_data_coordinates, dict):
            def get_shape(value):
                try:
                    return value.shape
                except AttributeError:
                    return tuple()

            shape = np.broadcast_shapes(
                *[get_shape(v) for v in x.values()]
            )
            shape_for_reshaping = (int(np.product(shape)),)

            def reshape(value):
                try:
                    return np.reshape(value, shape_for_reshaping)
                except ValueError:
                    if isinstance(value, int) or isinstance(value, float) or value.shape == tuple() or np.product(
                            value.shape) == 1:
                        return value * np.ones(shape_for_reshaping)
                raise ValueError("Could not reshape value of one of the inputs!")

            x = np.stack(tuple(
                reshape(x[k])
                for k, v in self.x_data_coordinates.items()
            ))

        output = np.interpn(
            points=self.x_data_coordinates_values,
            values=self.y_data_structured,
            xi=x,
            method=self.method,
            bounds_error=False,  # Can't be set true if general MX-type inputs are to be expected.
            fill_value=self.fill_value
        )
        try:
            return np.reshape(output, shape)
        except UnboundLocalError:
            return output
Example #2
0
    def solve(self,
              parameter_mapping: Dict[cas.MX, float] = None,
              max_iter: int = 1000,
              max_runtime: float = 1e20,
              callback: Callable = None,
              verbose: bool = True,
              jit: bool = False,  # TODO document, add unit tests for jit
              options: Dict = None,  # TODO document
              ) -> cas.OptiSol:
        """
        Solve the optimization problem using CasADi with IPOPT backend.

        Args:
            parameter_mapping: [Optional] Allows you to specify values for parameters.
                Dictionary where the key is the parameter and the value is the value to be set to.

                Example: # TODO update syntax for required init_guess
                    >>> opti = asb.Opti()
                    >>> x = opti.variable()
                    >>> p = opti.parameter()
                    >>> opti.minimize(x ** 2)
                    >>> opti.subject_to(x >= p)
                    >>> sol = opti.solve(
                    >>>     {
                    >>>         p: 5 # Sets the value of parameter p to 5, then solves.
                    >>>     }
                    >>> )

            max_iter: [Optional] The maximum number of iterations allowed before giving up.

            max_runtime: [Optional] Gives the maximum allowable runtime before giving up.

            callback: [Optional] A function to be called at each iteration of the optimization algorithm.
                Useful for printing progress or displaying intermediate results.

                The callback function `func` should have the syntax `func(iteration_number)`, where iteration_number
                is an integer corresponding to the current iteration number. In order to access intermediate
                quantities of optimization variables (e.g. for plotting), use the `Opti.debug.value(x)` syntax for
                each variable `x`.

            verbose: Should we print the output of IPOPT?

        Returns: An OptiSol object that contains the solved optimization problem. To extract values, use
            OptiSol.value(variable).

            Example:
                >>> sol = opti.solve()
                >>> x_opt = sol.value(x) # Get the value of variable x at the optimum.

        """
        if parameter_mapping is None:
            parameter_mapping = {}

        ### If you're loading frozen variables from cache, do it here:
        if self.load_frozen_variables_from_cache:
            solution_dict = self.get_solution_dict_from_cache()
            for category in self.variable_categories_to_freeze:
                category_variables = self.variables_categorized[category]
                category_values = solution_dict[category]

                if len(category_variables) != len(category_values):
                    raise RuntimeError("""Problem with loading cached solution: it looks like new variables have been
                    defined since the cached solution was saved (or variables were defined in a different order). 
                    Because of this, the cache cannot be loaded. 
                    Re-run the original optimization study to regenerate the cached solution.""")

                for var, val in zip(category_variables, category_values):
                    if not var.is_manually_frozen:
                        parameter_mapping = {
                            **parameter_mapping,
                            var: val
                        }

        ### Map any parameters to needed values
        for k, v in parameter_mapping.items():
            size_k = np.product(k.shape)
            try:
                size_v = np.product(v.shape)
            except AttributeError:
                size_v = 1
            if size_k != size_v:
                raise RuntimeError("""Problem with loading cached solution: it looks like the length of a vectorized 
                variable has changed since the cached solution was saved (or variables were defined in a different order). 
                Because of this, the cache cannot be loaded. 
                Re-run the original optimization study to regenerate the cached solution.""")

            self.set_value(k, v)

        ### Set solver settings.
        if options is None:
            options = {}

        if jit:
            options["jit"] = True
            # options["compiler"] = "shell"  # Recommended by CasADi devs, but doesn't work on my machine
            options["jit_options"] = {
                "flags": ["-O3"],
                # "verbose": True
            }

        options["ipopt.sb"] = 'yes'  # Hide the IPOPT banner.

        if verbose:
            options["ipopt.print_level"] = 5  # Verbose, per-iteration printing.
        else:
            options["print_time"] = False  # No time printing
            options["ipopt.print_level"] = 0  # No printing from IPOPT

        # Set defaults, if not set
        if "ipopt.max_iter" not in options:
            options["ipopt.max_iter"] = max_iter
        if "ipopt.max_cpu_time" not in options:
            options["ipopt.max_cpu_time"] = max_runtime
        if "ipopt.mu_strategy" not in options:
            options["ipopt.mu_strategy"] = "adaptive"

        self.solver('ipopt', options)

        # Set the callback
        if callback is not None:
            self.callback(callback)

        # Do the actual solve
        sol = super().solve()

        if self.save_to_cache_on_solve:
            self.save_solution()

        return sol
Example #3
0
 def reshape(value):
     try:
         return np.reshape(value, shape_for_reshaping)
     except ValueError:
         if isinstance(value, int) or isinstance(value, float) or value.shape == tuple() or np.product(
                 value.shape) == 1:
             return value * np.ones(shape_for_reshaping)
     raise ValueError("Could not reshape value of one of the inputs!")
Example #4
0
    def solve(self,
              parameter_mapping: Dict[cas.MX, float] = None,
              max_iter: int = 3000,
              callback: Callable = None,
              solver: str = 'ipopt'
              ) -> cas.OptiSol:
        """
        Solve the optimization problem.

        Args:
            parameter_mapping: [Optional] Allows you to specify values for parameters.
                Dictionary where the key is the parameter and the value is the value to be set to.

                Example:
                    >>> opti = asb.Opti()
                    >>> x = opti.variable()
                    >>> p = opti.parameter()
                    >>> opti.minimize(x ** 2)
                    >>> opti.subject_to(x >= p)
                    >>> sol = opti.solve(
                    >>>     {
                    >>>         p: 5 # Sets the value of parameter p to 5, then solves.
                    >>>     }
                    >>> )

            max_iter: [Optional] The maximum number of iterations allowed before giving up.

            callback: [Optional] A function to be called at each iteration of the optimization algorithm.
                Useful for printing progress or displaying intermediate results.

                The callback function `func` should have the syntax `func(iteration_number)`, where iteration_number
                is an integer corresponding to the current iteration number. In order to access intermediate quantities
                of optimization variables, use the `Opti.debug.value(x)` syntax for each variable `x`.

            solve: [Optional] Which optimization backend do you wish to use? [str] Only tested with "ipopt".

        Returns: An OptiSol object that contains the solved optimization problem. To extract values, use
            OptiSol.value(variable).

            Example:
                >>> sol = opti.solve()
                >>> x_opt = sol.value(x) # Get the value of variable x at the optimum.

        """
        if parameter_mapping is None:
            parameter_mapping = {}

        # If you're loading frozen variables from cache, do it here:
        if self.load_frozen_variables_from_cache:
            solution_dict = self.get_solution_dict_from_cache()
            for category in self.variable_categories_to_freeze:
                category_variables = self.variables_categorized[category]
                category_values = solution_dict[category]

                if len(category_variables) != len(category_values):
                    raise RuntimeError("""Problem with loading cached solution: it looks like new variables have been
                    defined since the cached solution was saved (or variables were defined in a different order). 
                    Because of this, the cache cannot be loaded. 
                    Re-run the original optimization study to regenerate the cached solution.""")

                for var, val in zip(category_variables, category_values):
                    if not var.is_manually_frozen:
                        parameter_mapping = {
                            **parameter_mapping,
                            var: val
                        }

        # Map any parameters to needed values
        for k, v in parameter_mapping.items():
            size_k = np.product(k.shape)
            size_v = np.product(v.shape)
            if size_k != size_v:
                raise RuntimeError("""Problem with loading cached solution: it looks like the length of a vectorized 
                variable has changed since the cached solution was saved (or variables were defined in a different order). 
                Because of this, the cache cannot be loaded. 
                Re-run the original optimization study to regenerate the cached solution.""")

            self.set_value(k, v)

        # Set solver settings.
        p_opts = {}
        s_opts = {}
        s_opts["max_iter"] = max_iter
        s_opts["mu_strategy"] = "adaptive"
        self.solver(solver, p_opts, s_opts)  # Default to IPOPT solver

        # Set the callback
        if callback is not None:
            self.callback(callback)

        # Do the actual solve
        sol = super().solve()

        if self.save_to_cache_on_solve:
            self.save_solution()

        return sol