Пример #1
0
def test_jacobian_matrix():
    x = matrix()
    y = 2 * x.sum(axis=0)
    rng = np.random.RandomState(seed=utt.fetch_seed())
    ev = np.zeros((10, 10, 10))
    for dx in range(10):
        ev[dx, :, dx] = 2.0

    # test when the jacobian is called with a tensor as wrt
    Jx = jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a tuple as wrt
    Jx = jacobian(y, (x, ))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a list as wrt
    Jx = jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    assert np.allclose(f(vx), ev)

    # test when the jacobian is called with a list of two elements
    z = matrix()
    y = (x * z).sum(axis=1)
    Js = jacobian(y, [x, z])
    f = aesara.function([x, z], Js)
    vx = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    vz = rng.uniform(size=(10, 10)).astype(aesara.config.floatX)
    vJs = f(vx, vz)
    evx = np.zeros((10, 10, 10))
    evz = np.zeros((10, 10, 10))
    for dx in range(10):
        evx[dx, dx, :] = vx[dx, :]
        evz[dx, dx, :] = vz[dx, :]
    assert np.allclose(vJs[0], evz)
    assert np.allclose(vJs[1], evx)
Пример #2
0
def test_jacobian_vector():
    x = vector()
    y = x * 2
    rng = np.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = jacobian(y, (x, ))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list as wrt
    Jx = jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list of two elements
    z = vector()
    y = x * z
    Js = jacobian(y, [x, z])
    f = aesara.function([x, z], Js)
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    vz = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    vJs = f(vx, vz)
    evx = np.zeros((10, 10))
    evz = np.zeros((10, 10))
    np.fill_diagonal(evx, vx)
    np.fill_diagonal(evz, vz)
    assert np.allclose(vJs[0], evz)
    assert np.allclose(vJs[1], evx)
Пример #3
0
    def test_dot_not_output(self):
        # Test the case where the vector input to the dot is not already an
        # output of the inner function.

        v = vector()
        m = matrix()
        output = dot(v, m)

        # Compile the function twice, once with the optimization and once
        # without
        opt_mode = mode.including("scan")
        f_opt = aesara.function([v, m], jacobian(output, v), mode=opt_mode)

        no_opt_mode = mode.excluding("scanOp_pushout_output")
        f_no_opt = aesara.function([v, m],
                                   jacobian(output, v),
                                   mode=no_opt_mode)

        # Ensure that the optimization was performed correctly in f_opt
        # The inner function of scan should have only one output and it should
        # not be the result of a Dot
        scan_node = [
            node for node in f_opt.maker.fgraph.toposort()
            if isinstance(node.op, Scan)
        ][0]
        assert len(scan_node.op.outputs) == 1
        assert not isinstance(scan_node.op.outputs[0], Dot)

        # Ensure that the function compiled with the optimization produces
        # the same results as the function compiled without
        v_value = np.random.random(4).astype(config.floatX)
        m_value = np.random.random((4, 5)).astype(config.floatX)

        output_opt = f_opt(v_value, m_value)
        output_no_opt = f_no_opt(v_value, m_value)

        utt.assert_allclose(output_opt, output_no_opt)
Пример #4
0
def test_jacobian_scalar():
    x = scalar()
    y = x * 2
    rng = np.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = jacobian(y, (x, ))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a list as wrt
    Jx = jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = np.cast[aesara.config.floatX](rng.uniform())
    assert np.allclose(f(vx), 2)

    # test when the jacobian is called with a list of two elements
    z = scalar()
    y = x * z
    Jx = jacobian(y, [x, z])
    f = aesara.function([x, z], Jx)
    vx = np.cast[aesara.config.floatX](rng.uniform())
    vz = np.cast[aesara.config.floatX](rng.uniform())
    vJx = f(vx, vz)

    assert np.allclose(vJx[0], vz)
    assert np.allclose(vJx[1], vx)
Пример #5
0
    def test_pushout_seqs(self):
        def init_predictive_output(inputs, targets, hyp, x_star, s_star):
            E = hyp.shape[0]

            def init_K(i, X, Y):
                XX = X.sum(1).reshape((X.shape[0], 1))
                K = XX + XX.T
                return K.sum()

            beta, K_updts = scan(
                init_K, sequences=at.arange(E), non_sequences=[inputs, targets]
            )

            # mean
            def predict_mean_i(i, x_star, s_star, X, beta, h):
                n, D = shape(X)
                # rescale every dimension by the corresponding inverse lengthscale
                iL = at.diag(h[i, :D])
                inp = (X - x_star).dot(iL)

                # compute the mean
                B = iL.dot(s_star).dot(iL)
                t = inp.dot(B)

                lb = (inp * t).sum() + beta.sum()

                Mi = at_sum(lb) * h[i, D]
                return Mi

            (M), M_updts = scan(
                predict_mean_i,
                sequences=at.arange(E),
                non_sequences=[x_star, s_star, inputs, beta, hyp],
            )
            return M

        # some initializations
        hypx = np.log(np.tile([1, 1, 1, 1, 1, 1, 0.01], (3, 1)))

        # variables used in the following expressions
        hyp = shared(hypx)
        inputs = dmatrix("X")
        targets = dmatrix("Y")
        x_star = dvector("x_star")
        s_star = dmatrix("s_star")

        M = init_predictive_output(inputs, targets, hyp, x_star, s_star)

        X = np.random.default_rng(utt.fetch_seed()).random((10, 4))
        Y = np.random.default_rng(utt.fetch_seed()).random((10, 3))
        test_m = np.random.default_rng(utt.fetch_seed()).random((4,))
        test_s = np.eye(4)

        # Compute expected outputs (jacobian of M wrt x_star)
        dfdm = function(
            [inputs, targets, x_star, s_star],
            [
                grad(M[0], x_star),
                grad(M[1], x_star),
                grad(M[2], x_star),
            ],
        )
        expected_output = dfdm(X, Y, test_m, test_s)

        # equivalent code for the jacobian using scan
        dMdm, dMdm_updts = scan(
            lambda i, M, x: grad(M[i], x),
            sequences=at.arange(M.shape[0]),
            non_sequences=[M, x_star],
        )
        dfdm = function([inputs, targets, x_star, s_star], [dMdm[0], dMdm[1], dMdm[2]])
        scan_output = dfdm(X, Y, test_m, test_s)

        dMdm_j = jacobian(M, x_star)
        dfdm_j = function(
            [inputs, targets, x_star, s_star], [dMdm_j[0], dMdm_j[1], dMdm_j[2]]
        )
        jacobian_outputs = dfdm_j(X, Y, test_m, test_s)

        utt.assert_allclose(expected_output, scan_output)
        utt.assert_allclose(expected_output, jacobian_outputs)