Beispiel #1
0
    def test_dot_not_output(self):
        # Test the case where the vector input to the dot is not already an
        # output of the inner function.

        v = tt.vector()
        m = tt.matrix()
        output = tt.dot(v, m)

        # Compile the function twice, once with the optimization and once
        # without
        opt_mode = mode.including("scan")
        f_opt = aesara.function([v, m], tt.jacobian(output, v), mode=opt_mode)

        no_opt_mode = mode.excluding("scanOp_pushout_output")
        f_no_opt = aesara.function([v, m], tt.jacobian(output, v), mode=no_opt_mode)

        # Ensure that the optimization was performed correctly in f_opt
        # The inner function of scan should have only one output and it should
        # not be the result of a Dot
        scan_node = [
            node for node in f_opt.maker.fgraph.toposort() if isinstance(node.op, Scan)
        ][0]
        assert len(scan_node.op.outputs) == 1
        assert not isinstance(scan_node.op.outputs[0], tt.Dot)

        # Ensure that the function compiled with the optimization produces
        # the same results as the function compiled without
        v_value = np.random.random(4).astype(config.floatX)
        m_value = np.random.random((4, 5)).astype(config.floatX)

        output_opt = f_opt(v_value, m_value)
        output_no_opt = f_no_opt(v_value, m_value)

        utt.assert_allclose(output_opt, output_no_opt)
def test_flow_det(flow_spec):
    z0 = at.arange(0, 20).astype("float32")
    flow = flow_spec(dim=20, z0=z0.dimshuffle("x", 0))
    with aesara.config.change_flags(compute_test_value="off"):
        z1 = flow.forward.flatten()
        J = at.jacobian(z1, z0)
        logJdet = at.log(at.abs_(at.nlinalg.det(J)))
        det = flow.logdet[0]
    np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)
Beispiel #3
0
def test_jacobian_matrix():
    x = tensor.matrix()
    y = 2 * x.sum(axis=0)
    rng = np.random.RandomState(seed=utt.fetch_seed())
    ev = np.zeros((10, 10, 10))
    for dx in range(10):
        ev[dx, :, dx] = 2.0

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a list of two elements
    z = tensor.matrix()
    y = (x * z).sum(axis=1)
    Js = tensor.jacobian(y, [x, z])
    f = aesara.function([x, z], Js)
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    vz = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    vJs = f(vx, vz)
    evx = np.zeros((10, 10, 10))
    evz = np.zeros((10, 10, 10))
    for dx in range(10):
        evx[dx, dx, :] = vx[dx, :]
        evz[dx, dx, :] = vz[dx, :]
    assert np.allclose(vJs[0], evz)
    assert np.allclose(vJs[1], evx)
Beispiel #4
0
def test_jacobian_vector():
    x = tensor.vector()
    y = x * 2
    rng = np.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = rng.uniform(size=(10,)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10,)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10,)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list of two elements
    z = tensor.vector()
    y = x * z
    Js = tensor.jacobian(y, [x, z])
    f = aesara.function([x, z], Js)
    vx = rng.uniform(size=(10,)).astype(aesara.config.floatX)
    vz = rng.uniform(size=(10,)).astype(aesara.config.floatX)
    vJs = f(vx, vz)
    evx = np.zeros((10, 10))
    evz = np.zeros((10, 10))
    np.fill_diagonal(evx, vx)
    np.fill_diagonal(evz, vz)
    assert np.allclose(vJs[0], evz)
    assert np.allclose(vJs[1], evx)
def test_flow_det_local(flow_spec):
    z0 = at.arange(0, 12).astype("float32")
    spec = flow_spec.cls.get_param_spec_for(d=12)
    params = dict()
    for k, shp in spec.items():
        params[k] = np.random.randn(1, *shp).astype("float32")
    flow = flow_spec(dim=12, z0=z0.reshape((1, 1, 12)), **params)
    assert flow.batched
    with aesara.config.change_flags(compute_test_value="off"):
        z1 = flow.forward.flatten()
        J = at.jacobian(z1, z0)
        logJdet = at.log(at.abs_(at.nlinalg.det(J)))
        det = flow.logdet[0]
    np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001)
Beispiel #6
0
def test_jacobian_scalar():
    x = tensor.scalar()
    y = x * 2
    rng = np.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = tensor.jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = tensor.jacobian(y, (x,))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a list as wrt
    Jx = tensor.jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a list of two elements
    z = tensor.scalar()
    y = x * z
    Jx = tensor.jacobian(y, [x, z])
    f = aesara.function([x, z], Jx)
    vx = np.cast[aesara.config.floatX](rng.uniform())
    vz = np.cast[aesara.config.floatX](rng.uniform())
    vJx = f(vx, vz)

    assert np.allclose(vJx[0], vz)
    assert np.allclose(vJx[1], vx)
Beispiel #7
0
def augment_system(ode_func, n_states, n_theta):
    """
    Function to create augmented system.

    Take a function which specifies a set of differential equations and return
    a compiled function which allows for computation of gradients of the
    differential equation's solition with repsect to the parameters.

    Uses float64 even if floatX=float32, because the scipy integrator always uses float64.

    Parameters
    ----------
    ode_func: function
        Differential equation.  Returns array-like.
    n_states: int
        Number of rows of the sensitivity matrix. (n_states)
    n_theta: int
        Number of ODE parameters

    Returns
    -------
    system: function
        Augemted system of differential equations.
    """

    # Present state of the system
    t_y = aet.vector("y", dtype="float64")
    t_y.tag.test_value = np.ones((n_states, ), dtype="float64")
    # Parameter(s).  Should be vector to allow for generaliztion to multiparameter
    # systems of ODEs.  Is m dimensional because it includes all initial conditions as well as ode parameters
    t_p = aet.vector("p", dtype="float64")
    t_p.tag.test_value = np.ones((n_states + n_theta, ), dtype="float64")
    # Time.  Allow for non-automonous systems of ODEs to be analyzed
    t_t = aet.scalar("t", dtype="float64")
    t_t.tag.test_value = 2.459

    # Present state of the gradients:
    # Will always be 0 unless the parameter is the inital condition
    # Entry i,j is partial of y[i] wrt to p[j]
    dydp_vec = aet.vector("dydp", dtype="float64")
    dydp_vec.tag.test_value = make_sens_ic(n_states, n_theta, "float64")

    dydp = dydp_vec.reshape((n_states, n_states + n_theta))

    # Get symbolic representation of the ODEs by passing tensors for y, t and theta
    yhat = ode_func(t_y, t_t, t_p[n_states:])
    # Stack the results of the ode_func into a single tensor variable
    if not isinstance(yhat, (list, tuple)):
        yhat = (yhat, )
    t_yhat = aet.stack(yhat, axis=0)

    # Now compute gradients
    J = aet.jacobian(t_yhat, t_y)

    Jdfdy = aet.dot(J, dydp)

    grad_f = aet.jacobian(t_yhat, t_p)

    # This is the time derivative of dydp
    ddt_dydp = (Jdfdy + grad_f).flatten()

    system = aesara.function(inputs=[t_y, t_t, t_p, dydp_vec],
                             outputs=[t_yhat, ddt_dydp],
                             on_unused_input="ignore")

    return system