Ejemplo n.º 1
0
 def __new__(cls,
             name,
             rho,
             *args,
             steps=None,
             constant=False,
             ar_order=None,
             **kwargs):
     rhos = at.atleast_1d(at.as_tensor_variable(floatX(rho)))
     ar_order = cls._get_ar_order(rhos=rhos,
                                  constant=constant,
                                  ar_order=ar_order)
     steps = get_steps(
         steps=steps,
         shape=None,  # Shape will be checked in `cls.dist`
         dims=kwargs.get("dims", None),
         observed=kwargs.get("observed", None),
         step_shape_offset=ar_order,
     )
     return super().__new__(cls,
                            name,
                            rhos,
                            *args,
                            steps=steps,
                            constant=constant,
                            ar_order=ar_order,
                            **kwargs)
Ejemplo n.º 2
0
    def dist(
        cls,
        rho,
        sigma=None,
        tau=None,
        *,
        init_dist=None,
        steps=None,
        constant=False,
        ar_order=None,
        **kwargs,
    ):
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)
        sigma = at.as_tensor_variable(floatX(sigma))
        rhos = at.atleast_1d(at.as_tensor_variable(floatX(rho)))

        if "init" in kwargs:
            warnings.warn(
                "init parameter is now called init_dist. Using init will raise an error in a future release.",
                FutureWarning,
            )
            init_dist = kwargs.pop("init")

        ar_order = cls._get_ar_order(rhos=rhos, constant=constant, ar_order=ar_order)
        steps = get_steps(steps=steps, shape=kwargs.get("shape", None), step_shape_offset=ar_order)
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps), ndim=0)

        if init_dist is not None:
            if not isinstance(init_dist, TensorVariable) or not isinstance(
                init_dist.owner.op, RandomVariable
            ):
                raise ValueError(
                    f"Init dist must be a distribution created via the `.dist()` API, "
                    f"got {type(init_dist)}"
                )
                check_dist_not_registered(init_dist)
            if init_dist.owner.op.ndim_supp > 1:
                raise ValueError(
                    "Init distribution must have a scalar or vector support dimension, ",
                    f"got ndim_supp={init_dist.owner.op.ndim_supp}.",
                )
        else:
            warnings.warn(
                "Initial distribution not specified, defaulting to "
                "`Normal.dist(0, 100, shape=...)`. You can specify an init_dist "
                "manually to suppress this warning.",
                UserWarning,
            )
            init_dist = Normal.dist(0, 100, shape=(*sigma.shape, ar_order))

        # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term
        init_dist = ignore_logprob(init_dist)

        return super().dist([rhos, sigma, init_dist, steps, ar_order, constant], **kwargs)
Ejemplo n.º 3
0
def augment_system(ode_func, n_states, n_theta):
    """
    Function to create augmented system.

    Take a function which specifies a set of differential equations and return
    a compiled function which allows for computation of gradients of the
    differential equation's solition with repsect to the parameters.

    Uses float64 even if floatX=float32, because the scipy integrator always uses float64.

    Parameters
    ----------
    ode_func: function
        Differential equation.  Returns array-like.
    n_states: int
        Number of rows of the sensitivity matrix. (n_states)
    n_theta: int
        Number of ODE parameters

    Returns
    -------
    system: function
        Augemted system of differential equations.
    """

    # Present state of the system
    t_y = at.vector("y", dtype="float64")
    t_y.tag.test_value = np.ones((n_states, ), dtype="float64")
    # Parameter(s).  Should be vector to allow for generaliztion to multiparameter
    # systems of ODEs.  Is m dimensional because it includes all initial conditions as well as ode parameters
    t_p = at.vector("p", dtype="float64")
    t_p.tag.test_value = np.ones((n_states + n_theta, ), dtype="float64")
    # Time.  Allow for non-automonous systems of ODEs to be analyzed
    t_t = at.scalar("t", dtype="float64")
    t_t.tag.test_value = 2.459

    # Present state of the gradients:
    # Will always be 0 unless the parameter is the inital condition
    # Entry i,j is partial of y[i] wrt to p[j]
    dydp_vec = at.vector("dydp", dtype="float64")
    dydp_vec.tag.test_value = make_sens_ic(n_states, n_theta, "float64")

    dydp = dydp_vec.reshape((n_states, n_states + n_theta))

    # Get symbolic representation of the ODEs by passing tensors for y, t and theta
    yhat = ode_func(t_y, t_t, t_p[n_states:])
    if isinstance(yhat, at.TensorVariable):
        t_yhat = at.atleast_1d(yhat)
    else:
        # Stack the results of the ode_func into a single tensor variable
        if not isinstance(yhat, (list, tuple)):
            raise TypeError(
                f"Unexpected type, {type(yhat)}, returned by ode_func. TensorVariable, list or tuple is expected."
            )
        t_yhat = at.stack(yhat, axis=0)
    if t_yhat.ndim > 1:
        raise ValueError(
            f"The odefunc returned a {t_yhat.ndim}-dimensional tensor, but 0 or 1 dimensions were expected."
        )

    # Now compute gradients
    J = at.jacobian(t_yhat, t_y)

    Jdfdy = at.dot(J, dydp)

    grad_f = at.jacobian(t_yhat, t_p)

    # This is the time derivative of dydp
    ddt_dydp = (Jdfdy + grad_f).flatten()

    system = aesara.function(inputs=[t_y, t_t, t_p, dydp_vec],
                             outputs=[t_yhat, ddt_dydp],
                             on_unused_input="ignore")

    return system