Exemple #1
0
    def test_default_updates_multiple(self):
        x = shared(0)
        y = shared(1)

        x.default_update = x - 1
        y.default_update = y + 1

        f1 = pfunc([], [x, y])
        f1()
        assert x.get_value() == -1
        assert y.get_value() == 2

        f2 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=[y])
        f2()
        assert x.get_value() == -3
        assert y.get_value() == 2

        f3 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=True)
        f3()
        assert x.get_value() == -5
        assert y.get_value() == 2

        f4 = pfunc([], [x, y], updates=[(y, (y - 2))])
        f4()
        assert x.get_value() == -6
        assert y.get_value() == 0
Exemple #2
0
    def test_ctors(self):

        if PYTHON_INT_BITWIDTH == 32:
            assert shared(7).type == iscalar, shared(7).type
        else:
            assert shared(7).type == lscalar, shared(7).type
        assert shared(7.0).type == dscalar
        assert shared(np.float32(7)).type == fscalar

        # test tensor constructor
        b = shared(np.zeros((5, 5), dtype="int32"))
        assert b.type == TensorType("int32", broadcastable=[False, False])
        b = shared(np.random.rand(4, 5))
        assert b.type == TensorType("float64", broadcastable=[False, False])
        b = shared(np.random.rand(5, 1, 2))
        assert b.type == TensorType("float64",
                                    broadcastable=[False, False, False])

        assert shared([]).type == generic

        def badfunc():
            shared(7, bad_kw=False)

        with pytest.raises(TypeError):
            badfunc()
Exemple #3
0
    def test_doc(self):
        # Ensure the code given in pfunc.txt works as expected

        # Example #1.
        a = lscalar()
        b = shared(1)
        f1 = pfunc([a], (a + b))
        f2 = pfunc([In(a, value=44)], a + b, updates={b: b + 1})
        assert b.get_value() == 1
        assert f1(3) == 4
        assert f2(3) == 4
        assert b.get_value() == 2
        assert f1(3) == 5
        b.set_value(0)
        assert f1(3) == 3

        # Example #2.
        a = lscalar()
        b = shared(7)
        f1 = pfunc([a], a + b)
        f2 = pfunc([a], a * b)
        assert f1(5) == 12
        b.set_value(8)
        assert f1(5) == 13
        assert f2(4) == 32
    def test_strict_generic(self):

        # this should work, because
        # generic can hold anything even when strict=True

        u = shared("asdf", strict=False)
        v = shared("asdf", strict=True)

        u.set_value(88)
        v.set_value(88)
Exemple #5
0
    def test_clone0(self):
        x = shared(np.asarray([4, 4, 4]))
        y = shared(np.asarray([4, 4, 4]))
        z = shared(np.asarray([2, 2, 2]))
        up = pfunc(
            [], [], updates={x: (x * 5), y: ((x * 5) + y), z: (((x * 5) + y) ** z)}
        )

        up()
        assert np.all(x.get_value() == 20)
        assert np.all(y.get_value() == 24)
        assert np.all(z.get_value() == (24 ** 2))
    def test_scalar_floatX(self):

        # the test should assure that floatX is not used in the shared
        # constructor for scalars Shared values can change, and since we don't
        # know the range they might take, we should keep the same
        # bit width / precision as the original value used to create the
        # shared variable.

        # Since downcasting of a value now raises an Exception,

        def f(var, val):
            var.set_value(val)

        b = shared(np.int64(7), allow_downcast=True)
        assert b.type == aesara.tensor.lscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(np.int32(7), allow_downcast=True)
        assert b.type == aesara.tensor.iscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(np.int16(7), allow_downcast=True)
        assert b.type == aesara.tensor.wscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(np.int8(7), allow_downcast=True)
        assert b.type == aesara.tensor.bscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(np.float64(7.234), allow_downcast=True)
        assert b.type == aesara.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(np.float32(7.234), allow_downcast=True)
        assert b.type == aesara.tensor.fscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(np.float(7.234), allow_downcast=True)
        assert b.type == aesara.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(7.234, allow_downcast=True)
        assert b.type == aesara.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(np.zeros((5, 5), dtype="float32"))
        with pytest.raises(TypeError):
            f(b, np.random.rand(5, 5))
Exemple #7
0
    def test_shared(self):

        # CHECK: two functions (f1 and f2) can share w
        w = shared(np.random.rand(2, 2), "w")
        wval = w.get_value(borrow=False)

        x = dmatrix()
        out1 = w + x
        out2 = w * x
        f1 = pfunc([x], [out1])
        f2 = pfunc([x], [out2])
        xval = np.random.rand(2, 2)
        assert np.all(f1(xval) == xval + wval)
        assert np.all(f2(xval) == xval * wval)

        # CHECK: updating a shared value
        f3 = pfunc([x], out1, updates=[(w, (w - 1))])
        # f3 changes the value of w
        assert np.all(f3(xval) == xval + wval)
        # this same value is read by f1
        assert np.all(f1(xval) == xval + (wval - 1))

        w.set_value(w.get_value(borrow=True) * 10, borrow=True)
        # this same value is read by f1
        assert np.all(f1(xval) == xval + w.get_value(borrow=True))
Exemple #8
0
    def gen(self, op, *args, **kwargs):
        """
        Create a new random stream in this container.

        Parameters
        ----------
        op
            A RandomFunction instance to
        args
            Interpreted by `op`.
        kwargs
            Interpreted by `op`.

        Returns
        -------
        Tensor Variable
            The symbolic random draw part of op()'s return value.
            This function stores the updated RandomStateType Variable
            for use at `build` time.

        """
        seed = int(self.gen_seedgen.randint(2**30))
        random_state_variable = shared(np.random.RandomState(seed))
        # Add a reference to distinguish from other shared variables
        random_state_variable.tag.is_rng = True
        new_r, out = op(random_state_variable, *args, **kwargs)
        out.rng = random_state_variable
        out.update = (random_state_variable, new_r)
        self.state_updates.append(out.update)
        random_state_variable.default_update = new_r
        return out
Exemple #9
0
    def test_shared_mutable(self):
        bval = np.arange(5)
        b = shared(bval)
        b_out = b * 2

        # shared vars copy args.
        assert b.get_value(borrow=True) is not bval
        # so we do this to get at the underlying data
        bval = data_of(b)

        # by default, shared are not mutable unless doing an explicit update
        f = pfunc([], [b_out], mode="FAST_RUN")
        assert (f() == np.arange(5) * 2).all()
        assert np.all(b.get_value(borrow=True) == np.arange(5))

        # using updates, b is now a mutable parameter
        f = pfunc([], [b_out], updates=[(b, b_out)], mode="FAST_RUN")
        assert (f() == (np.arange(5) * 2)).all()
        # because of the update
        assert (b.get_value(borrow=True) == (np.arange(5) * 2)).all()
        assert (bval == (np.arange(5) * 2)).all()  # because of mutable=True

        # do not depend on updates being in-place though!
        bval = np.arange(5)
        b.set_value(bval, borrow=True)
        bval = data_of(b)
        f = pfunc([], [b_out], updates=[(b, (b_out + 3))], mode="FAST_RUN")
        assert (f() == (np.arange(5) * 2)).all()
        # because of the update
        assert (b.get_value(borrow=True) == ((np.arange(5) * 2) + 3)).all()
        # bval got modified to something...
        assert not (bval == np.arange(5)).all()
        # ... but not to b.value !
        assert not (bval == b.get_value(borrow=True)).all()
Exemple #10
0
    def test_givens_replaces_shared_variable2(self):
        a = shared(1.0, "a")
        a.default_update = a + 3
        c = a + 10
        f = pfunc([], c, givens={a: (a + 10)})

        assert f() == 21
        assert f() == 34
Exemple #11
0
 def test_no_shared_as_input(self):
     # Test that shared variables cannot be used as function inputs.
     w_init = np.random.rand(2, 2)
     w = shared(w_init.copy(), "w")
     with pytest.raises(
         TypeError, match=r"^Cannot use a shared variable \(w\) as explicit input"
     ):
         pfunc([w], aet_sum(w * w))
Exemple #12
0
    def test_update_equiv(self):
        # Like test_update_same, but the update expression is simplified until
        # it is found to be equal to the original variable
        a = shared(1.0, "a")
        b = shared(np.ones((2, 3)), "b")

        # See comment in test_update_same about why we try both
        # shared variables.
        f = function([], [], updates=[(a, a), (b, (2 * b - b))])
        g = function([], [], updates=[(a, (a * 2 - a)), (b, b)])

        f()
        assert a.get_value(borrow=True).shape == (), a.get_value()
        assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
        g()
        assert a.get_value(borrow=True).shape == (), a.get_value()
        assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
Exemple #13
0
 def test_default_scalar_container(self):
     # Similar in spirit to test_default_container, but updating a scalar
     # variable. This is a sanity check for non mutable types.
     x = shared(0.0, "x")
     f = pfunc([], x)
     assert f() == 0
     x.set_value(x.get_value(borrow=True) + 1, borrow=True)
     assert f() == 1
Exemple #14
0
 def test_default_updates_partial_graph(self):
     a = shared(0)
     a.default_update = a + 1  # Increment a each time it is used
     b = 2 * a
     # Use only the tip of the graph, a is not used
     f = pfunc([b], b)
     assert a.get_value() == 0
     f(21)
     assert a.get_value() == 0
Exemple #15
0
    def test_givens_replaces_shared_variable(self):
        a = shared(1.0, "a")
        a.default_update = a + 3.0
        b = dscalar("b")
        c = a + 10
        f = pfunc([b], c, givens={a: b})

        assert len(f.maker.fgraph.inputs) == 1
        assert len(f.maker.fgraph.outputs) == 1
Exemple #16
0
    def test_tensor_floatX(self):
        def f(var, val):
            var.set_value(val)

        b = shared(np.int64([7]), allow_downcast=True)
        assert b.type == lvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(np.int32([7]), allow_downcast=True)
        assert b.type == ivector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(np.int16([7]), allow_downcast=True)
        assert b.type == wvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(np.int8([7]), allow_downcast=True)
        assert b.type == bvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(np.float64([7.234]), allow_downcast=True)
        assert b.type == dvector
        f(b, [8])
        assert b.get_value() == 8

        b = shared(np.float32([7.234]), allow_downcast=True)
        assert b.type == fvector
        f(b, [8])
        assert b.get_value() == 8

        # np.float([7.234]) don't work
        #        b = shared(np.float([7.234]))
        #        assert b.type == dvector
        #        f(b,[8])

        # This generate a generic type. Should we cast? I don't think.
        #        b = shared([7.234])
        #        assert b.type == dvector
        #        f(b,[8])

        b = shared(np.asarray([7.234], dtype=config.floatX),
                   allow_downcast=True)
        assert b.dtype == config.floatX
        f(b, [8])
        assert b.get_value() == 8

        b = shared(np.zeros((5, 5), dtype="float32"))
        with pytest.raises(TypeError):
            f(b, np.random.rand(5, 5))
Exemple #17
0
    def test_default_updates_expressions(self):
        x = shared(0)
        y = shared(1)
        a = lscalar("a")

        z = a * x
        x.default_update = x + y

        f1 = pfunc([a], z)
        f1(12)
        assert x.get_value() == 1

        f2 = pfunc([a], z, no_default_updates=True)
        assert f2(7) == 7
        assert x.get_value() == 1

        f3 = pfunc([a], z, no_default_updates=[x])
        assert f3(9) == 9
        assert x.get_value() == 1
    def test_scalar_strict(self):
        def f(var, val):
            var.set_value(val)

        b = shared(np.int64(7), strict=True)
        assert b.type == aesara.tensor.lscalar
        with pytest.raises(TypeError):
            f(b, 8.23)

        b = shared(np.int32(7), strict=True)
        assert b.type == aesara.tensor.iscalar
        with pytest.raises(TypeError):
            f(b, 8.23)

        b = shared(np.int16(7), strict=True)
        assert b.type == aesara.tensor.wscalar
        with pytest.raises(TypeError):
            f(b, 8.23)

        b = shared(np.int8(7), strict=True)
        assert b.type == aesara.tensor.bscalar
        with pytest.raises(TypeError):
            f(b, 8.23)

        b = shared(np.float64(7.234), strict=True)
        assert b.type == aesara.tensor.dscalar
        with pytest.raises(TypeError):
            f(b, 8)

        b = shared(np.float32(7.234), strict=True)
        assert b.type == aesara.tensor.fscalar
        with pytest.raises(TypeError):
            f(b, 8)

        b = shared(np.float(7.234), strict=True)
        assert b.type == aesara.tensor.dscalar
        with pytest.raises(TypeError):
            f(b, 8)

        b = shared(7.234, strict=True)
        assert b.type == aesara.tensor.dscalar
        with pytest.raises(TypeError):
            f(b, 8)

        b = shared(np.zeros((5, 5), dtype="float32"))
        with pytest.raises(TypeError):
            f(b, np.random.rand(5, 5))
Exemple #19
0
    def __init__(
        self,
        input=None,
        target=None,
        n_input=1,
        n_hidden=1,
        n_output=1,
        lr=1e-3,
        **kw,
    ):
        super().__init__(**kw)

        if input is None:
            input = dvector("input")
        if target is None:
            target = dvector("target")

        self.input = input
        self.target = target
        self.lr = shared(lr, "learning_rate")
        self.w1 = shared(np.zeros((n_hidden, n_input)), "w1")
        self.w2 = shared(np.zeros((n_output, n_hidden)), "w2")
        # print self.lr.type

        self.hidden = sigmoid(dot(self.w1, self.input))
        self.output = dot(self.w2, self.hidden)
        self.cost = aet_sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * grad(self.cost, self.w2),
        }

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates,
        )

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
Exemple #20
0
    def test_update(self):
        # Test update mechanism in different settings.

        # Simple value assignment.
        x = shared(0)
        assign = pfunc([], [], updates={x: 3})
        assign()
        assert x.get_value() == 3

        # Basic increment function.
        x.set_value(0)
        inc = pfunc([], [], updates={x: x + 1})
        inc()
        assert x.get_value() == 1

        # Increment by a constant value.
        x.set_value(-1)
        y = shared(2)
        inc_by_y = pfunc([], [], updates={x: x + y})
        inc_by_y()
        assert x.get_value() == 1
Exemple #21
0
    def test_default_updates_input(self):
        x = shared(0)
        y = shared(1)
        if PYTHON_INT_BITWIDTH == 32:
            a = iscalar("a")
        else:
            a = lscalar("a")

        x.default_update = y
        y.default_update = y + a

        f1 = pfunc([], x, no_default_updates=True)
        f1()
        assert x.get_value() == 0
        assert y.get_value() == 1

        f2 = pfunc([], x, no_default_updates=[x])
        f2()
        assert x.get_value() == 0
        assert y.get_value() == 1

        f3 = pfunc([], x, no_default_updates=[y])
        f3()
        assert x.get_value() == 1
        assert y.get_value() == 1

        f4 = pfunc([a], x)
        f4(2)
        assert x.get_value() == 1
        assert y.get_value() == 3

        f5 = pfunc([], x, updates={y: (y - 1)})
        f5()
        assert x.get_value() == 3
        assert y.get_value() == 2

        # a is needed as input if y.default_update is used
        with pytest.raises(MissingInputError):
            pfunc([], x)
Exemple #22
0
    def test_update_err_broadcast(self):
        # Test that broadcastable dimensions raise error
        data = np.random.rand(10, 10).astype("float32")
        output_var = shared(name="output", value=data)

        # the update_var has type matrix, and the update expression
        # is a broadcasted scalar, and that should be allowed.
        with pytest.raises(TypeError):
            function(
                inputs=[],
                outputs=[],
                updates={output_var: output_var.sum().dimshuffle("x", "x")},
            )
Exemple #23
0
    def test_default_updates_chained(self):
        x = shared(2)
        y = shared(1)
        z = shared(-1)

        x.default_update = x - y
        y.default_update = z
        z.default_update = z - 1

        f1 = pfunc([], [x])
        f1()
        assert x.get_value() == 1
        assert y.get_value() == -1
        assert z.get_value() == -2

        f2 = pfunc([], [x, y])
        f2()
        assert x.get_value() == 2
        assert y.get_value() == -2
        assert z.get_value() == -3

        f3 = pfunc([], [y])
        f3()
        assert x.get_value() == 2
        assert y.get_value() == -3
        assert z.get_value() == -4

        f4 = pfunc([], [x, y], no_default_updates=[x])
        f4()
        assert x.get_value() == 2
        assert y.get_value() == -4
        assert z.get_value() == -5

        f5 = pfunc([], [x, y, z], no_default_updates=[z])
        f5()
        assert x.get_value() == 6
        assert y.get_value() == -5
        assert z.get_value() == -5
Exemple #24
0
def test_gradient_scan():
    # Test for a crash when using MRG inside scan and taking the gradient
    # See https://groups.google.com/d/msg/theano-dev/UbcYyU5m-M8/UO9UgXqnQP0J
    aesara_rng = MRG_RandomStream(10)
    w = shared(np.ones(1, dtype="float32"))

    def one_step(x):
        return x + aesara_rng.uniform((1, ), dtype="float32") * w

    x = vector(dtype="float32")
    values, updates = scan(one_step, outputs_info=x, n_steps=10)
    gw = grad(aet_sum(values[-1]), w)
    f = function([x], gw)
    f(np.arange(1, dtype="float32"))
Exemple #25
0
    def test_default_container(self):
        # Ensure it is possible to (implicitly) use a shared variable in a
        # function, as a 'state' that can be updated at will.

        rng = np.random.RandomState(1827)
        w_init = rng.rand(5)
        w = shared(w_init.copy(), "w")
        reg = aet_sum(w * w)
        f = pfunc([], reg)

        assert f() == np.sum(w_init * w_init)
        # Change the value of w and ensure the output changes accordingly.
        w.set_value(w.get_value(borrow=True) + 1.0, borrow=True)
        assert f() == np.sum((w_init + 1) ** 2)
    def test_ctors(self):

        if aesara.configdefaults.python_int_bitwidth() == 32:
            assert shared(7).type == aesara.tensor.iscalar, shared(7).type
        else:
            assert shared(7).type == aesara.tensor.lscalar, shared(7).type
        assert shared(7.0).type == aesara.tensor.dscalar
        assert shared(np.float32(7)).type == aesara.tensor.fscalar

        # test tensor constructor
        b = shared(np.zeros((5, 5), dtype="int32"))
        assert b.type == TensorType("int32", broadcastable=[False, False])
        b = shared(np.random.rand(4, 5))
        assert b.type == TensorType("float64", broadcastable=[False, False])
        b = shared(np.random.rand(5, 1, 2))
        assert b.type == TensorType("float64", broadcastable=[False, False, False])

        assert shared([]).type == generic

        def badfunc():
            shared(7, bad_kw=False)

        with pytest.raises(TypeError):
            badfunc()
Exemple #27
0
    def test_update_same(self):
        # There was a bug in CVM, triggered when a shared variable
        # was its own update expression.
        a = shared(1.0, "a")
        b = shared(np.ones((2, 3)), "b")

        # The order of the variables is not determined, so we try
        # both shared variables.
        # TODO: explain the above comment. By "not determined" does
        # this mean "not deterministic"?
        # This test originally wrote the updates using dictionaries,
        # and iterating over the dictionary was not deterministic.
        # Is that all the comment above meant, or is the CVM intended
        # to add extra non-determinism? Or is the CVM meant to
        # deterministically but arbitrarily pick an order for the updates?
        f = function([], [], updates=[(a, a), (b, (2 * b))])
        g = function([], [], updates=[(a, (a * 2)), (b, b)])

        f()
        assert a.get_value(borrow=True).shape == (), a.get_value()
        assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
        g()
        assert a.get_value(borrow=True).shape == (), a.get_value()
        assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
Exemple #28
0
def test_undefined_grad_opt():
    # Make sure that undefined grad get removed in optimized graph.
    random = MRG_RandomStream(np.random.randint(1, 2147462579))
    pvals = shared(np.random.rand(10, 20).astype(config.floatX))
    pvals = pvals / pvals.sum(axis=1)
    pvals = zero_grad(pvals)
    samples = random.multinomial(pvals=pvals, n=1)
    samples = cast(samples, pvals.dtype)
    samples = zero_grad(samples)
    cost = tt_sum(samples + pvals)
    grad_out = grad(cost, samples)
    f = function([], grad_out)
    assert not any(
        [isinstance(node.op, UndefinedGrad) for node in f.maker.fgraph.apply_nodes]
    )
Exemple #29
0
    def test_default_updates(self):
        x = shared(0)
        x.default_update = x + 1

        f = pfunc([], [x])
        f()
        assert x.get_value() == 1

        del x.default_update
        f()
        assert x.get_value() == 2

        g = pfunc([], [x])
        g()
        assert x.get_value() == 2
Exemple #30
0
    def test_givens(self):
        x = shared(0)
        assign = pfunc([], x, givens={x: 3})
        assert assign() == 3
        assert x.get_value(borrow=True) == 0

        y = ivector()
        f = pfunc([y], (y * x), givens={x: 6})
        assert np.all(f([1, 1, 1]) == [6, 6, 6])
        assert x.get_value() == 0

        z = ivector()
        c = z * y
        f = pfunc([y], (c + 7), givens={z: _asarray([4, 4, 4], dtype="int32")})
        assert np.all(f([1, 1, 1]) == [11, 11, 11])
        assert x.get_value() == 0