def test_uniform_forward_difference_first_degree():
    assert np.finite_difference_coefficients(
        x=np.arange(2), x0=0,
        derivative_degree=1) == pytest.approx(np.array([-1, 1]))
    assert np.finite_difference_coefficients(
        x=np.arange(9), x0=0, derivative_degree=1) == pytest.approx(
            np.array([
                -761 / 280, 8, -14, 56 / 3, -35 / 2, 56 / 5, -14 / 3, 8 / 7,
                -1 / 8
            ]))
def test_uniform_central_difference():
    assert np.finite_difference_coefficients(
        x=[-1, 0, 1], x0=0,
        derivative_degree=1) == pytest.approx(np.array([-0.5, 0, 0.5]))
    assert np.finite_difference_coefficients(
        x=[-1, 0, 1], x0=0,
        derivative_degree=2) == pytest.approx(np.array([1, -2, 1]))
    assert np.finite_difference_coefficients(
        x=[-2, -1, 0, 1, 2], x0=0, derivative_degree=2) == pytest.approx(
            np.array([-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12]))
def test_nonuniform_difference():
    assert np.finite_difference_coefficients(
        x=[-1, 2], x0=0,
        derivative_degree=1) == pytest.approx(np.array([-1 / 3, 1 / 3]))
def test_uniform_forward_difference_higher_order():
    assert np.finite_difference_coefficients(
        x=np.arange(5), x0=0, derivative_degree=3) == pytest.approx(
            np.array([-5 / 2, 9, -12, 7, -3 / 2]))
Exemplo n.º 5
0
    def constrain_derivative(
        self,
        derivative: cas.MX,
        variable: cas.MX,
        with_respect_to: Union[np.ndarray, cas.MX],
        method: str = "midpoint",
        regularize: bool = False,
    ) -> None:
        """
        Adds a constraint to the optimization problem such that:

            d(variable) / d(with_respect_to) == derivative

        Can be used directly; also called indirectly by opti.derivative_of() for implicit derivative creation.

        Args:
            derivative: The derivative that is to be constrained here.

            variable: The variable or quantity that you are taking the derivative of. The "numerator" of the
            derivative, in colloquial parlance.

            with_respect_to: The variable or quantity that you are taking the derivative with respect to. The
            "denominator" of the derivative, in colloquial parlance.

                In a typical example case, this `with_respect_to` parameter would be time. Please make sure that the
                value of this parameter is monotonically increasing, otherwise you may get nonsensical answers.

            method: The type of integrator to use to define this derivative. Options are:

                * "forward euler" - a first-order-accurate forward Euler method

                    Citation: https://en.wikipedia.org/wiki/Euler_method

                * "backwards euler" - a first-order-accurate backwards Euler method

                    Citation: https://en.wikipedia.org/wiki/Backward_Euler_method

                * "midpoint" or "trapezoid" - a second-order-accurate midpoint method

                    Citation: https://en.wikipedia.org/wiki/Midpoint_method

                * "simpson" - Simpson's rule for integration

                    Citation: https://en.wikipedia.org/wiki/Simpson%27s_rule

                * "runge-kutta" or "rk4" - a fourth-order-accurate Runge-Kutta method. I suppose that technically,
                "forward euler", "backward euler", and "midpoint" are all (lower-order) Runge-Kutta methods...

                    Citation: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#The_Runge%E2%80%93Kutta_method

                * "runge-kutta-3/8" - A modified version of the Runge-Kutta 4 proposed by Kutta in 1901. Also
                fourth-order-accurate, but all of the error coefficients are smaller than they are in the standard
                Runge-Kutta 4 method. The downside is that more floating point operations are required per timestep,
                as the Butcher tableau is more dense (i.e. not banded).

                    Citation: Kutta, Martin (1901), "Beitrag zur näherungsweisen Integration totaler
                    Differentialgleichungen", Zeitschrift für Mathematik und Physik, 46: 435–453

            Note that all methods are expressed as integrators rather than differentiators; this prevents
            singularities from forming in the limit of timestep approaching zero. (For those coming from the PDE
            world, this is analogous to using finite volume methods rather than finite difference methods to allow
            shock capturing.)

            regularize: Most of these integration methods result in N-1 constraints for a problem with N state
            variables. This makes the problem ill-posed, as there is an extra degree of freedom added to the problem.
            If the regularize flag is set True, we will automatically add one more constraint to make the problem
            well-posed. The specific constraint that is added depends on the integration method used.

        Returns: None (adds constraint in-place).

        """
        d_var = np.diff(variable)
        d_time = np.diff(with_respect_to)  # Calculate the timestep

        # TODO scale constraints by variable scale?
        # TODO make

        if method == "forward euler":
            raise NotImplementedError
            self.subject_to(d_var == derivative[:-1] * d_time)
            self.subject_to(derivative[-1] == derivative[
                -2]  # First-order constraint at last point
                            )

        elif method == "backward euler":
            raise NotImplementedError
            self.subject_to(d_var == derivative[1:] * d_time)
            self.subject_to(derivative[0] == derivative[
                1]  # First-order constraint at first point
                            )

        elif method == "midpoint" or method == "trapezoid":
            self.subject_to(d_var == np.trapz(derivative) * d_time, )
            if regularize:
                # Apply a second-order constraint at the first point
                coefficients = np.finite_difference_coefficients(
                    x=with_respect_to[:3],
                    x0=with_respect_to[0],
                    derivative_degree=1)
                derivative_value = np.sum(variable[:3] * coefficients)
                self.subject_to(derivative[0] == derivative_value)

        elif method == "simpson":
            raise NotImplementedError

        elif method == "runge-kutta" or method == "rk4":
            raise NotImplementedError

        elif method == "runge-kutta-3/8":
            raise NotImplementedError