def test_grad(self, cls_ofg):
     x, y, z = tt.matrices("xyz")
     e = x + y * z
     op = cls_ofg([x, y, z], [e])
     f = op(x, y, z)
     f = f - tt.grad(tt.sum(f), y)
     fn = function([x, y, z], f)
     xv = np.ones((2, 2), dtype=config.floatX)
     yv = np.ones((2, 2), dtype=config.floatX) * 3
     zv = np.ones((2, 2), dtype=config.floatX) * 5
     assert np.all(11.0 == fn(xv, yv, zv))
    def test_connection_pattern(self, cls_ofg):
        # Basic case
        x, y, z = tt.matrices("xyz")
        out1 = x * y
        out2 = y * z

        op1 = cls_ofg([x, y, z], [out1, out2])
        results = op1.connection_pattern(None)
        expect_result = [[True, False], [True, True], [False, True]]
        assert results == expect_result

        # Graph with ops that don't have a 'full' connection pattern
        # and with ops that have multiple outputs
        m, n, p, q = tt.matrices("mnpq")
        o1, o2 = op1(m, n, p)
        out1, out2 = op1(o1, q, o2)
        op2 = cls_ofg([m, n, p, q], [out1, out2])

        results = op2.connection_pattern(None)
        expect_result = [[True, False], [True, True], [False, True],
                         [True, True]]
        assert results == expect_result

        # Inner graph where some computation doesn't rely on explicit inputs
        srng = RandomStreams(seed=234)
        rv_u = srng.uniform((2, 2))
        x, y = tt.matrices("xy")
        out1 = x + rv_u
        out2 = y + 3
        out3 = 3 + rv_u
        op3 = cls_ofg([x, y], [out1, out2, out3])

        results = op3.connection_pattern(None)
        expect_result = [
            [True, False, False],
            [False, True, False],
            [True, False, True],
        ]
        assert results == expect_result
 def test_size_changes(self, cls_ofg):
     x, y, z = tt.matrices("xyz")
     e = tt.dot(x, y)
     op = cls_ofg([x, y], [e])
     f = op(x, op(y, z))
     fn = function([x, y, z], f)
     xv = np.ones((2, 3), dtype=config.floatX)
     yv = np.ones((3, 4), dtype=config.floatX) * 3
     zv = np.ones((4, 5), dtype=config.floatX) * 5
     res = fn(xv, yv, zv)
     assert res.shape == (2, 5)
     assert np.all(180.0 == res)
     res = fn(xv, yv, zv)
     assert res.shape == (2, 5)
     assert np.all(180.0 == res)
    def test_shared(self, cls_ofg):
        x, y, z = tt.matrices("xyz")
        s = shared(np.random.rand(2, 2).astype(config.floatX))
        e = x + y * z + s
        op = cls_ofg([x, y, z], [e])
        # (1+3*5=array of 16) - (3+1*5=array of 8)
        f = op(x, y, z) - op(y, z, x)

        fn = function([x, y, z], f)
        xv = np.ones((2, 2), dtype=config.floatX)
        yv = np.ones((2, 2), dtype=config.floatX) * 3
        zv = np.ones((2, 2), dtype=config.floatX) * 5
        # print function, function.__module__
        # print fn.maker.fgraph.toposort()
        assert np.allclose(8.0, fn(xv, yv, zv))
        assert np.allclose(8.0, fn(xv, yv, zv))
    def test_straightforward(self, cls_ofg):
        x, y, z = tt.matrices("xyz")
        e = x + y * z
        op = cls_ofg([x, y, z], [e])
        # (1+3*5=array of 16) - (3+1*5=array of 8)
        f = op(x, y, z) - op(y, z, x)

        fn = function([x, y, z], f)
        xv = np.ones((2, 2), dtype=config.floatX)
        yv = np.ones((2, 2), dtype=config.floatX) * 3
        zv = np.ones((2, 2), dtype=config.floatX) * 5
        # print function, function.__module__
        # print fn.maker.fgraph.toposort()
        fn(xv, yv, zv)
        assert np.all(8.0 == fn(xv, yv, zv))
        assert np.all(8.0 == fn(xv, yv, zv))
    def test_shared_grad(self, cls_ofg):
        x, y, z = tt.matrices("xyz")
        s = shared(np.random.rand(2, 2).astype(config.floatX))
        e = x + y * z + s
        op = cls_ofg([x, y, z], [e])
        f = op(x, y, z)
        f = f - tt.grad(tt.sum(f), y)
        fn = function([x, y, z], f)
        xv = np.ones((2, 2), dtype=config.floatX)
        yv = np.ones((2, 2), dtype=config.floatX) * 3
        zv = np.ones((2, 2), dtype=config.floatX) * 5
        assert np.allclose(11.0 + s.get_value(), fn(xv, yv, zv))

        # grad again the shared variable
        f = op(x, y, z)
        f = f - tt.grad(tt.sum(f), s)
        fn = function([x, y, z], f)
        assert np.allclose(15.0 + s.get_value(), fn(xv, yv, zv))
Beispiel #7
0
import time

import numpy as np

import aesara
from aesara import tensor as tt
from aesara.ifelse import ifelse


a, b = tt.scalars("a", "b")
x, y = tt.matrices("x", "y")

z_switch = tt.switch(tt.lt(a, b), tt.mean(x), tt.mean(y))
z_lazy = ifelse(tt.lt(a, b), tt.mean(x), tt.mean(y))

f_switch = aesara.function([a, b, x, y], z_switch)
f_lazyifelse = aesara.function([a, b, x, y], z_lazy)

val1 = 0.0
val2 = 1.0
big_mat1 = np.ones((10000, 1000))
big_mat2 = np.ones((10000, 1000))

n_times = 10

tic = time.clock()
for i in range(n_times):
    f_switch(val1, val2, big_mat1, big_mat2)
print("time spent evaluating both values %f sec" % (time.clock() - tic))

tic = time.clock()