Esempio n. 1
0
def check_shape_lifted_rv(rv, params, size, rng):
    tt_params = []
    for p in params:
        p_tt = tt.as_tensor(p)
        p_tt = p_tt.type()
        p_tt.tag.test_value = p
        tt_params.append(p_tt)

    tt_size = []
    for s in size:
        s_tt = tt.as_tensor(s)
        s_tt = s_tt.type()
        s_tt.tag.test_value = s
        tt_size.append(s_tt)

    rv = rv(*tt_params, size=tt_size, rng=rng)
    rv_lifted = lift_rv_shapes(rv.owner)

    # Make sure the size input is empty
    assert np.array_equal(rv_lifted.inputs[1].data, [])

    f_ref = function(
        tt_params + tt_size,
        rv,
        mode=no_mode,
    )
    f_lifted = function(
        tt_params + tt_size,
        rv_lifted.outputs[1],
        mode=no_mode,
    )
    f_ref_val = f_ref(*(params + size))
    f_lifted_val = f_lifted(*(params + size))
    assert np.array_equal(f_ref_val, f_lifted_val)
Esempio n. 2
0
    def test_binomial_vector(self):
        rng_R = random_state_type()
        n = tensor.lvector()
        prob = tensor.vector()
        post_r, out = binomial(rng_R, n=n, p=prob)
        assert out.ndim == 1
        f = function([rng_R, n, prob], [post_r, out], accept_inplace=True)

        n_val = [1, 2, 3]
        prob_val = np.asarray([0.1, 0.2, 0.3], dtype=config.floatX)
        rng = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(utt.fetch_seed())

        # Arguments of size (3,)
        rng0, val0 = f(rng, n_val, prob_val)
        numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
        assert np.all(val0 == numpy_val0)

        # arguments of size (2,)
        rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])
        numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
        assert np.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function(
            [rng_R, n, prob],
            binomial(rng_R, n=n, p=prob, size=(3, )),
            accept_inplace=True,
        )
        rng2, val2 = g(rng1, n_val, prob_val)
        numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3, ))
        assert np.all(val2 == numpy_val2)
        with pytest.raises(ValueError):
            g(rng2, n_val[:-1], prob_val[:-1])
Esempio n. 3
0
    def test_givens_input_var(self):
        # Ensure error is raised when trying to replace an input variable.

        x = tt.scalar("x")
        y = x * 2
        with pytest.raises(RuntimeError):
            function([x], y, givens={x: x + 1})
Esempio n. 4
0
    def __init__(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")
        v = tt.vector("v")

        self.s = s
        self.x = x
        self.v = v

        self.e = a * x + s

        self.f1 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )

        self.f2 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s,
                   value=self.f1.container[s],
                   update=s + a * x,
                   mutable=True),
            ],
            s + a * x,
        )
Esempio n. 5
0
    def test_no_inplace(self):
        # Test that when not running inplace, the RandomState is not updated
        rf = RandomFunction("uniform", tensor.dvector)
        rng_R = random_state_type()

        post_r, out = rf(rng_R, (3, ), 0.0, 1.0)
        f = function([rng_R], [post_r, out])
        rng = np.random.RandomState(utt.fetch_seed())

        rng0, val0 = f(rng)
        rng_ = np.random.RandomState(utt.fetch_seed())
        # rng should still be in a fresh state
        assert rng_R.type.values_eq(rng, rng_)
        # rng0 should be in an updated state
        assert not rng_R.type.values_eq(rng, rng0)

        f2 = function([In(rng_R, value=rng, update=post_r, mutable=False)],
                      [post_r, out])
        rng2, val2 = f2()
        # rng should be in a fresh state
        assert rng_R.type.values_eq(rng, rng_)
        # rng2 should be in an updated state
        assert not rng_R.type.values_eq(rng, rng2)
        # The updated state should be the same for both functions
        assert rng_R.type.values_eq(rng2, rng0)

        rng3, val3 = f2()
        # rng2 should not have changed
        assert rng_R.type.values_eq(rng2, rng0)
        # rng3 should be an updated again version of rng2
        assert not rng_R.type.values_eq(rng3, rng2)
        assert not rng_R.type.values_eq(rng3, rng)
Esempio n. 6
0
    def test_shared_state0(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
        g = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=f.container[s], update=s - a * x, mutable=True),
            ],
            s + a * x,
        )

        f(1, 2)
        assert f[s] == 2
        assert g[s] == 2
        g(1, 2)
        assert f[s] == 0
        assert g[s] == 0
Esempio n. 7
0
    def test_shared_state2(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=False),
            ],
            s + a * x,
        )
        g = function(
            [x, In(a, value=1.0, name="a"),
             In(s, value=f.container[s])], s + a * x)

        f(1, 2)
        assert f[s] == 2
        assert g[s] == 2
        f(1, 2)
        assert f[s] == 4
        assert g[s] == 4
        g(1, 2)  # has no effect on state
        assert f[s] == 4
        assert g[s] == 4
Esempio n. 8
0
 def test_in_shared_variable(self):
     # Ensure that an error is raised if the In wrapped is used to wrap
     # a shared variable
     a = theano.shared(1.0)
     a_wrapped = In(a, update=a + 1)
     with pytest.raises(TypeError):
         function([a_wrapped])
Esempio n. 9
0
    def test_borrow_output(self):
        a = tt.dmatrix()
        f = function([a], Out(a, borrow=False))
        o = np.ones((3, 3))
        assert o is not f(
            o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should not clobber the memory used to store four
        assert np.all(four == 4)

        f = function([a],
                     Out(a * 4, borrow=True),
                     mode=theano.Mode("c|py_nogc", "fast_run"))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should clobber the memory used to store four
        if theano.config.cxx:
            assert not np.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert np.all(four == 4)
Esempio n. 10
0
 def test_masked_input(self):
     m = tt.matrix("m")
     mt = m.T
     mt.name = "m.T"
     with pytest.raises(UnusedInputError):
         function([m, mt], mt * 2)
     function([m, mt], mt * 2, on_unused_input="ignore")
Esempio n. 11
0
    def test_disconnected_input(self):
        a = tt.scalar("a")
        v = tt.vector("v")
        with pytest.raises(UnusedInputError):
            function([a, v], v * 2)

        function([a, v], v * 2, on_unused_input="ignore")
Esempio n. 12
0
    def test_vector_arguments(self):
        rng_R = random_state_type()
        low = tensor.vector()
        post_r, out = uniform(rng_R, low=low, high=1)
        assert out.ndim == 1
        f = function([rng_R, low], [post_r, out], accept_inplace=True)

        def as_floatX(thing):
            return np.asarray(thing, dtype=theano.config.floatX)

        rng_state0 = np.random.RandomState(utt.fetch_seed())
        numpy_rng = np.random.RandomState(utt.fetch_seed())
        post0, val0 = f(rng_state0, [-5, 0.5, 0, 1])
        post1, val1 = f(post0, as_floatX([0.9]))
        numpy_val0 = as_floatX(numpy_rng.uniform(low=[-5, 0.5, 0, 1], high=1))
        numpy_val1 = as_floatX(numpy_rng.uniform(low=as_floatX([0.9]), high=1))

        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)

        high = tensor.vector()
        post_rb, outb = uniform(rng_R, low=low, high=high)
        assert outb.ndim == 1
        fb = function([rng_R, low, high], [post_rb, outb], accept_inplace=True)

        post0b, val0b = fb(post1, [-4.0, -2], [-1, 0])
        post1b, val1b = fb(post0b, [-4.0], [-1])
        numpy_val0b = as_floatX(numpy_rng.uniform(low=[-4.0, -2], high=[-1,
                                                                        0]))
        numpy_val1b = as_floatX(numpy_rng.uniform(low=[-4.0], high=[-1]))
        assert np.all(val0b == numpy_val0b)
        assert np.all(val1b == numpy_val1b)
        with pytest.raises(ValueError):
            fb(post1b, [-4.0, -2], [-1, 0, 1])
        # TODO: do we want that?
        # with pytest.raises(ValueError):
        #     fb(post1b, [-4., -2], [-1])

        size = tensor.lvector()
        post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1)
        fc = function([rng_R, low, high, size], [post_rc, outc],
                      accept_inplace=True)
        post0c, val0c = fc(post1b, [-4.0, -2], [-1, 0], [2])
        post1c, val1c = fc(post0c, [-4.0], [-1], [1])
        numpy_val0c = as_floatX(numpy_rng.uniform(low=[-4.0, -2], high=[-1,
                                                                        0]))
        numpy_val1c = as_floatX(numpy_rng.uniform(low=[-4.0], high=[-1]))
        assert np.all(val0c == numpy_val0c)
        assert np.all(val1c == numpy_val1c)

        with pytest.raises(ValueError):
            fc(post1c, [-4.0, -2], [-1, 0], [1, 2])
        with pytest.raises(ValueError):
            fc(post1c, [-4.0, -2], [-1, 0], [2, 1])
        with pytest.raises(ValueError):
            fc(post1c, [-4.0, -2], [-1, 0], [1])
        with pytest.raises(ValueError):
            fc(post1c, [-4.0, -2], [-1], [1])
Esempio n. 13
0
def test_empty_givens_updates():
    # Regression test for bug fixed in 8625e03.

    # Empty givens / updates dictionaries were not properly detected before,
    # triggering useless crashes at compile time.
    x = tt.scalar()
    y = x * 2
    function([theano.In(x)], y, givens={})
    function([theano.In(x)], y, updates={})
Esempio n. 14
0
def test_DimShuffle_lift(ds_order, lifted, dist_op, dist_params, size, rtol):

    rng = shared(np.random.RandomState(1233532), borrow=False)

    dist_params_tt = []
    for p in dist_params:
        p_tt = tt.as_tensor(p).type()
        p_tt.tag.test_value = p
        dist_params_tt.append(p_tt)

    size_tt = []
    for s in size:
        s_tt = tt.iscalar()
        s_tt.tag.test_value = s
        size_tt.append(s_tt)

    dist_st = dist_op(*dist_params_tt, size=size_tt,
                      rng=rng).dimshuffle(ds_order)

    f_inputs = [
        p for p in dist_params_tt + size_tt
        if not isinstance(p, (slice, Constant))
    ]

    mode = Mode(
        "py",
        EquilibriumOptimizer([local_dimshuffle_rv_lift], max_use_ratio=100))

    f_opt = function(
        f_inputs,
        dist_st,
        mode=mode,
    )

    (new_out, ) = f_opt.maker.fgraph.outputs

    if lifted:
        assert new_out.owner.op == dist_op
        assert all(
            isinstance(i.owner.op, DimShuffle)
            for i in new_out.owner.inputs[3:] if i.owner)
    else:
        assert isinstance(new_out.owner.op, DimShuffle)
        return

    f_base = function(
        f_inputs,
        dist_st,
        mode=no_mode,
    )

    arg_values = [p.get_test_value() for p in f_inputs]
    res_base = f_base(*arg_values)
    res_opt = f_opt(*arg_values)

    np.testing.assert_allclose(res_base, res_opt, rtol=rtol)
Esempio n. 15
0
    def test_lop_override(self, cls_ofg):
        x = tt.vector()
        y = 1.0 / (1.0 + tt.exp(-x))

        def lop_ov(inps, outs, grads):
            (y_,) = outs
            (dedy_,) = grads
            return [2.0 * y_ * (1.0 - y_) * dedy_]

        y_, dedy = tt.vector(), tt.vector()
        op_lop_ov = cls_ofg([x, y_, dedy], [2.0 * y_ * (1.0 - y_) * dedy])

        xx = tt.vector()
        yy1 = tt.sum(tt.nnet.sigmoid(xx))
        gyy1 = 2.0 * tt.grad(yy1, xx)

        for ov in [lop_ov, op_lop_ov]:
            op = cls_ofg([x], [y], lop_overrides=ov)
            yy2 = tt.sum(op(xx))
            gyy2 = tt.grad(yy2, xx)
            fn = function([xx], [gyy1, gyy2])

            xval = np.random.rand(32).astype(config.floatX)
            y1val, y2val = fn(xval)
            assert np.allclose(y1val, y2val)
Esempio n. 16
0
    def test_in_allow_downcast_vector_floatX(self):
        a = theano.tensor.fvector("a")
        b = theano.tensor.fvector("b")
        c = theano.tensor.fvector("c")

        f = function(
            [
                In(a, allow_downcast=True),
                In(b, allow_downcast=False),
                In(c, allow_downcast=None),
            ],
            (a + b + c),
        )

        # If the values can be accurately represented, everything is OK
        z = [0]
        assert np.all(f(z, z, z) == 0)

        # If allow_downcast is True, idem
        assert np.allclose(f([0.1], z, z), 0.1)

        # If allow_downcast is False, nope
        with pytest.raises(TypeError):
            f(z, [0.1], z)

        # If allow_downcast is None, like False
        with pytest.raises(TypeError):
            f(z, z, [0.1])
Esempio n. 17
0
    def test_inplace_optimization(self):
        # Test that FAST_RUN includes the random_make_inplace optimization
        # inplace = False
        rf2 = RandomFunction(np.random.RandomState.uniform, tensor.dvector)
        rng_R = random_state_type()

        # If calling RandomFunction directly, all args have to be specified,
        # because shape will have to be moved to the end
        post_r2, out2 = rf2(rng_R, (4, ), 0.0, 1.0)

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r2,
                    mutable=True,
                )
            ],
            out2,
            mode="FAST_RUN",
        )  # DEBUG_MODE can't pass the id-based
        # test below

        # test that the RandomState object stays the same from function call to
        # function call, but that the values returned change from call to call.

        id0 = id(f[rng_R])
        val0 = f()
        assert id0 == id(f[rng_R])
        val1 = f()
        assert id0 == id(f[rng_R])

        assert not np.allclose(val0, val1)
Esempio n. 18
0
    def test_random_function_ndim(self):
        # Test that random_function helper function accepts argument ndim
        rng_R = random_state_type()

        # ndim is an optional argument indicating the length of the 'shape'
        # ndim not specified, OK
        post_out4, out4 = uniform(rng_R, (4, ))

        # ndim specified, consistent with shape, OK
        post_out1_4, out1_4 = uniform(rng_R, (4, ), ndim=1)
        post_out2_4_4, out2_4_4 = uniform(rng_R, (4, 4), ndim=2)

        # ndim specified, but not compatible with shape
        with pytest.raises(ValueError):
            uniform(rng_R, (4, ), ndim=2)

        f_ok = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_out2_4_4,
                    mutable=True,
                )
            ],
            [out4, out1_4, out2_4_4],
            accept_inplace=True,
        )

        # The correct cases should execute properly
        o4, o1_4, o2_4_4 = f_ok()

        # Check the sanity of the answers
        assert np.allclose(o4, o1_4)
        assert np.allclose(o4, o2_4_4[0])
Esempio n. 19
0
 def test_none(self):
     fn = function([], None)  # ok
     rval = fn()
     assert (
         rval != []
     ), "See #254: Using None as function output leads to [] return value"
     assert rval is None
Esempio n. 20
0
    def test_copy(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )

        g = copy.copy(f)
        # if they both return, assume  that they return equivalent things.

        assert g.container[x].storage is not f.container[x].storage
        assert g.container[a].storage is not f.container[a].storage
        assert g.container[s].storage is not f.container[s].storage

        assert g.value[a] is f.value[a]  # should not have been copied
        assert (g.value[s] is not f.value[s]
                )  # should have been copied because it is mutable.
        assert not (g.value[s] !=
                    f.value[s]).any()  # its contents should be identical

        assert f(2, 1) == g(
            2)  # they should be in sync, default value should be copied.
        assert f(2, 1) == g(
            2)  # they should be in sync, default value should be copied.
        f(1, 2)  # put them out of sync
        assert f(1, 2) != g(1, 2)  # they should not be equal anymore.
Esempio n. 21
0
    def test_state_access(self):
        a = tt.scalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x)
            ],
            s + a * x,
        )

        assert f[a] == 1.0
        assert f[s] == 0.0

        assert f(3.0) == 3.0
        assert f(3.0, a=2.0) == 9.0  # 3.0 + 2*3.0

        assert (
            f[a] == 1.0
        )  # state hasn't changed permanently, we just overrode it last line
        assert f[s] == 9.0

        f[a] = 5.0
        assert f[a] == 5.0
        assert f(3.0) == 24.0  # 9 + 3*5
        assert f[s] == 24.0
Esempio n. 22
0
    def test_binomial(self):
        # Test that raw_random.binomial generates the same results as numpy.
        # Check over two calls to see if the random state is correctly updated.
        rng_R = random_state_type()
        # Use non-default parameters, and larger dimensions because of
        # the integer nature of the result
        post_r, bin = binomial(rng_R, (7, 12), 5, 0.8)

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r,
                    mutable=True,
                )
            ],
            [bin],
            accept_inplace=True,
        )

        numpy_rng = np.random.RandomState(utt.fetch_seed())
        val0 = f()
        val1 = f()
        numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12))
        numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12))
        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)
Esempio n. 23
0
    def test_normal(self):
        # Test that raw_random.normal generates the same results as numpy.
        # Check over two calls to see if the random state is correctly updated.
        rng_R = random_state_type()
        # Use non-default parameters
        post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r,
                    mutable=True,
                )
            ],
            [out],
            accept_inplace=True,
        )

        numpy_rng = np.random.RandomState(utt.fetch_seed())
        val0 = f()
        val1 = f()
        numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))
        numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))
        assert np.allclose(val0, numpy_val0)
        assert np.allclose(val1, numpy_val1)
Esempio n. 24
0
    def test_in_allow_downcast_floatX(self):
        a = theano.tensor.fscalar("a")
        b = theano.tensor.fscalar("b")
        c = theano.tensor.fscalar("c")

        f = function(
            [
                In(a, allow_downcast=True),
                In(b, allow_downcast=False),
                In(c, allow_downcast=None),
            ],
            (a + b + c),
        )

        # If the values can be accurately represented, everything is OK
        assert np.all(f(0, 0, 0) == 0)

        # If allow_downcast is True, idem
        assert np.allclose(f(0.1, 0, 0), 0.1)

        # If allow_downcast is False, nope
        with pytest.raises(TypeError):
            f(0, 0.1, 0)

        # If allow_downcast is None, it should work iff floatX=float32
        if theano.config.floatX == "float32":
            assert np.allclose(f(0, 0, 0.1), 0.1)
        else:
            with pytest.raises(TypeError):
                f(0, 0, 0.1)
Esempio n. 25
0
    def test_poisson(self):
        # Test that raw_random.poisson generates the same results as numpy.
        # Check over two calls to see if the random state is correctly updated.
        rng_R = random_state_type()
        # Use non-default parameters, and larger dimensions because of
        # the integer nature of the result
        post_r, out = poisson(rng_R, lam=5, size=(11, 8))

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r,
                    mutable=True,
                )
            ],
            [out],
            accept_inplace=True,
        )

        numpy_rng = np.random.RandomState(utt.fetch_seed())
        val0 = f()
        val1 = f()
        numpy_val0 = numpy_rng.poisson(5, size=(11, 8))
        numpy_val1 = numpy_rng.poisson(5, size=(11, 8))
        assert np.allclose(val0, numpy_val0)
        assert np.allclose(val1, numpy_val1)
Esempio n. 26
0
    def test_multinomial(self):
        # Test that raw_random.multinomial generates the same results as numpy.
        # Check over two calls to see if the random state is correctly updated.
        rng_R = random_state_type()
        post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5)

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r,
                    mutable=True,
                )
            ],
            [out],
            accept_inplace=True,
        )

        numpy_rng = np.random.RandomState(utt.fetch_seed())
        (val0, ) = f()
        (val1, ) = f()
        numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))
        numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))
        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)

        assert val0.shape == (7, 3, 5)
        assert val1.shape == (7, 3, 5)
Esempio n. 27
0
    def test_mixed_shape(self):
        # Test when the provided shape is a tuple of ints and scalar vars
        rng_R = random_state_type()
        shape0 = tensor.lscalar()
        shape = (shape0, 3)
        post_r, u = uniform(rng_R, size=shape, ndim=2)
        f = function([rng_R, shape0], u)
        rng_state0 = np.random.RandomState(utt.fetch_seed())

        assert f(rng_state0, 2).shape == (2, 3)
        assert f(rng_state0, 8).shape == (8, 3)

        post_r, v = uniform(rng_R, size=shape)
        g = function([rng_R, shape0], v)
        assert g(rng_state0, 2).shape == (2, 3)
        assert g(rng_state0, 8).shape == (8, 3)
Esempio n. 28
0
    def test_deepcopy_trust_input(self):
        a = tt.dscalar()  # the a is for 'anonymous' (un-named).
        x, s = tt.dscalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
        f.trust_input = True
        try:
            g = copy.deepcopy(f)
        except NotImplementedError as e:
            if e[0].startswith("DebugMode is not picklable"):
                return
            else:
                raise
        assert f.trust_input is g.trust_input
        f(np.asarray(2.0))
        with pytest.raises((ValueError, AttributeError,
                            theano.compile.debugmode.InvalidValueError)):
            f(2.0)
        g(np.asarray(2.0))
        with pytest.raises((ValueError, AttributeError,
                            theano.compile.debugmode.InvalidValueError)):
            g(2.0)
Esempio n. 29
0
    def test_in_allow_downcast_int(self):
        a = theano.tensor.wvector("a")  # int16
        b = theano.tensor.bvector("b")  # int8
        c = theano.tensor.bscalar("c")  # int8
        f = function(
            [
                In(a, allow_downcast=True),
                In(b, allow_downcast=False),
                In(c, allow_downcast=None),
            ],
            (a + b + c),
        )

        # Both values are in range. Since they're not ndarrays (but lists),
        # they will be converted, and their value checked.
        assert np.all(f([3], [6], 1) == 10)

        # Values are in range, but a dtype too large has explicitly been given
        # For performance reasons, no check of the data is explicitly performed
        # (It might be OK to change this in the future.)
        with pytest.raises(TypeError):
            f([3], np.array([6], dtype="int16"), 1)

        # Value too big for a, silently ignored
        assert np.all(f([2**20], np.ones(1, dtype="int8"), 1) == 2)

        # Value too big for b, raises TypeError
        with pytest.raises(TypeError):
            f([3], [312], 1)

        # Value too big for c, raises TypeError
        with pytest.raises(TypeError):
            f([3], [6], 806)
Esempio n. 30
0
    def test_random_function_noshape_args(self):
        # Test if random_function helper works with args but without shape
        rng_R = random_state_type()

        # No shape, default args -> OK
        post_out, out = uniform(rng_R, size=None, ndim=2)
        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_out,
                    mutable=True,
                )
            ],
            [out],
            accept_inplace=True,
        )
        (o, ) = f()

        # No shape, args that have to be broadcasted -> OK
        low = tensor.TensorType(dtype="float64",
                                broadcastable=(False, True, True))()
        high = tensor.TensorType(dtype="float64",
                                 broadcastable=(True, True, True, False))()
        post_out2, out2 = uniform(rng_R, size=None, ndim=2, low=low, high=high)
        assert out2.ndim == 4
        assert out2.broadcastable == (True, False, True, False)

        g = function(
            [
                low,
                high,
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_out2,
                    mutable=True,
                ),
            ],
            [out2],
            accept_inplace=True,
        )
        low_v = [[[3]], [[4]], [[-5]]]
        high_v = [[[[5, 8]]]]
        (o2, ) = g(low_v, high_v)
        assert o2.shape == (1, 3, 1, 2)