Esempio n. 1
0
    def test_pushout1(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')
        x, y = ifelse(c, (x1, y1), (x2, y2), name='f1')
        z = ifelse(c, w1, w2, name='f2')
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
                            allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()

        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 * vy1 * vw1)
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 * vy2 * vw2)
Esempio n. 2
0
    def test_merge_ifs_true_false(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')

        out = ifelse(c,
            ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1,
            ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2)
        f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
                            allow_input_downcast=True)
        assert len([x for x in f.maker.fgraph.toposort()
                if isinstance(x.op, IfElse)]) == 1

        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 + vy1 + vw1)
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 + vy2 + vw2)
Esempio n. 3
0
    def test_pushout2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar("x1")
        x2 = tensor.scalar("x2")
        y1 = tensor.scalar("y1")
        y2 = tensor.scalar("y2")
        w1 = tensor.scalar("w1")
        w2 = tensor.scalar("w2")
        c = tensor.iscalar("c")
        x, y = ifelse(c, (x1, y1), (x2, y2), name="f1")
        z = ifelse(x > y, w1, w2, name="f2")
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()
        if vx1 > vy1:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 * vy1 * vw)

        if vx2 > vy2:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 * vy2 * vw)
Esempio n. 4
0
 def test_merge(self):
     raise SkipTest("Optimization temporarily disabled")
     x = tensor.vector("x")
     y = tensor.vector("y")
     c = tensor.iscalar("c")
     z1 = ifelse(c, x + 1, y + 1)
     z2 = ifelse(c, x + 2, y + 2)
     z = z1 + z2
     f = theano.function([c, x, y], z)
     assert len([n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse)]) == 1
Esempio n. 5
0
 def test_merge(self):
     raise SkipTest("Optimization temporarily disabled")
     x = tensor.vector('x')
     y = tensor.vector('y')
     c = tensor.iscalar('c')
     z1 = ifelse(c, x + 1, y + 1)
     z2 = ifelse(c, x + 2, y + 2)
     z = z1 + z2
     f = theano.function([c, x, y], z)
     assert len([x for x in f.maker.env.toposort()
                 if isinstance(x.op, IfElse)]) == 1
Esempio n. 6
0
    def test_grad_lazy_if(self):
        # Tests that we can compute the gradients through lazy if
        x = tensor.vector("x", dtype=self.dtype)
        y = tensor.vector("y", dtype=self.dtype)
        c = tensor.iscalar("c")
        z = ifelse(c, x, y)
        gx, gy = tensor.grad(z.sum(), [x, y])

        f = theano.function(
            [c, x, y],
            [self.cast_output(gx), self.cast_output(gy)],
            mode=self.mode)
        # There is only 2 of the 3 ifelse that are moved on the GPU.
        # The one that stay on the CPU is for the shape.
        self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
        rng = np.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = np.asarray(rng.uniform(size=(xlen, )), self.dtype)
        vy = np.asarray(rng.uniform(size=(ylen, )), self.dtype)
        gx0, gy0 = f(1, vx, vy)
        assert np.allclose(gx0.shape, vx.shape)
        assert np.allclose(gy0.shape, vy.shape)
        assert np.all(np.asarray(gx0) == 1.0)
        assert np.all(np.asarray(gy0) == 0.0)

        gx0, gy0 = f(0, vx, vy)
        assert np.allclose(gx0.shape, vx.shape)
        assert np.allclose(gy0.shape, vy.shape)
        assert np.all(np.asarray(gx0) == 0.0)
        assert np.all(np.asarray(gy0) == 1.0)
Esempio n. 7
0
    def test_multiple_out(self):
        x1 = tensor.vector('x1', dtype=self.dtype)
        x2 = tensor.vector('x2', dtype=self.dtype)
        y1 = tensor.vector('y1', dtype=self.dtype)
        y2 = tensor.vector('y2', dtype=self.dtype)
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x2), (y1, y2))
        f = theano.function([c, x1, x2, y1, y2], z, mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(2))

        ifnode = [x for x in f.maker.fgraph.toposort()
                  if isinstance(x.op, IfElse)][0]
        assert len(ifnode.outputs) == 2

        rng = numpy.random.RandomState(utt.fetch_seed())

        x1len = rng.randint(200)
        x2len = rng.randint(200)
        y1len = rng.randint(200)
        y2len = rng.randint(200)

        vx1 = numpy.asarray(rng.uniform(size=(x1len,)), self.dtype)
        vx2 = numpy.asarray(rng.uniform(size=(x2len,)), self.dtype)
        vy1 = numpy.asarray(rng.uniform(size=(y1len,)), self.dtype)
        vy2 = numpy.asarray(rng.uniform(size=(y2len,)), self.dtype)

        ovx1, ovx2 = f(1, vx1, vx2, vy1, vy2)
        ovy1, ovy2 = f(0, vx1, vx2, vy1, vy2)
        assert numpy.allclose(vx1, ovx1)
        assert numpy.allclose(vy1, ovy1)
        assert numpy.allclose(vx2, ovx2)
        assert numpy.allclose(vy2, ovy2)
Esempio n. 8
0
    def test_multiple_out_grad(self):
        # Tests that we can compute the gradients through lazy if
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x2), (y1, y2))
        grads = tensor.grad(z[0].sum() + z[1].sum(),
                            [x1, x2, y1, y2])

        f = theano.function([c, x1, x2, y1, y2], grads)
        rng = numpy.random.RandomState(utt.fetch_seed())

        lens = [rng.randint(200) for i in range(4)]
        values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
                  for l in lens]
        outs_1 = f(1, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_1[0] == 1.)
        assert numpy.all(outs_1[1] == 1.)
        assert numpy.all(outs_1[2] == 0.)
        assert numpy.all(outs_1[3] == 0.)

        outs_0 = f(0, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_0[0] == 0.)
        assert numpy.all(outs_0[1] == 0.)
        assert numpy.all(outs_0[2] == 1.)
        assert numpy.all(outs_0[3] == 1.)
Esempio n. 9
0
def test_ifelse():
    a = tt.scalar()
    b = generic()
    c = generic()

    notimpl = NotImplementedOp()

    lazys = [True]
    # We need lazy to end up being True for this test.
    if theano.config.vm__lazy in [True, None]:
        lazys = [True, None]

    cloops = [True, False]

    if theano.config.cxx == "":
        cloops = [False]

    for cloop in cloops:
        for lazy in lazys:
            linker = theano.link.vm.VMLinker(use_cloop=cloop, lazy=lazy)
            f = function(
                [a, b, c],
                ifelse(a, notimpl(b), c),
                mode=Mode(linker=linker, optimizer="fast_run"),
            )

            with pytest.raises(NotImplementedOpException):
                f(1, "a", "b")

            assert f(0, "a", "b") == "b"
Esempio n. 10
0
    def test_multiple_out(self):
        x1 = tensor.vector("x1", dtype=self.dtype)
        x2 = tensor.vector("x2", dtype=self.dtype)
        y1 = tensor.vector("y1", dtype=self.dtype)
        y2 = tensor.vector("y2", dtype=self.dtype)
        c = tensor.iscalar("c")
        z = ifelse(c, (x1, x2), (y1, y2))
        f = theano.function([c, x1, x2, y1, y2], z, mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(2))

        ifnode = [
            x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)
        ][0]
        assert len(ifnode.outputs) == 2

        rng = np.random.RandomState(utt.fetch_seed())

        x1len = rng.randint(200)
        x2len = rng.randint(200)
        y1len = rng.randint(200)
        y2len = rng.randint(200)

        vx1 = np.asarray(rng.uniform(size=(x1len, )), self.dtype)
        vx2 = np.asarray(rng.uniform(size=(x2len, )), self.dtype)
        vy1 = np.asarray(rng.uniform(size=(y1len, )), self.dtype)
        vy2 = np.asarray(rng.uniform(size=(y2len, )), self.dtype)

        ovx1, ovx2 = f(1, vx1, vx2, vy1, vy2)
        ovy1, ovy2 = f(0, vx1, vx2, vy1, vy2)
        assert np.allclose(vx1, ovx1)
        assert np.allclose(vy1, ovy1)
        assert np.allclose(vx2, ovx2)
        assert np.allclose(vy2, ovy2)
Esempio n. 11
0
    def test_multiple_out_grad(self):
        # Tests that we can compute the gradients through lazy if
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x2), (y1, y2))
        grads = tensor.grad(z[0].sum() + z[1].sum(), [x1, x2, y1, y2])

        f = theano.function([c, x1, x2, y1, y2], grads)
        rng = numpy.random.RandomState(utt.fetch_seed())

        lens = [rng.randint(200) for i in range(4)]
        values = [
            numpy.asarray(rng.uniform(size=(l, )), theano.config.floatX)
            for l in lens
        ]
        outs_1 = f(1, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_1[0] == 1.)
        assert numpy.all(outs_1[1] == 1.)
        assert numpy.all(outs_1[2] == 0.)
        assert numpy.all(outs_1[3] == 0.)

        outs_0 = f(0, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_0[0] == 0.)
        assert numpy.all(outs_0[1] == 0.)
        assert numpy.all(outs_0[2] == 1.)
        assert numpy.all(outs_0[3] == 1.)
Esempio n. 12
0
    def test_mixed_dtype(self):
        x1 = tensor.vector('x1', dtype='int32')
        x2 = tensor.vector('x2', dtype=self.dtype)
        y1 = tensor.vector('y1', dtype='int32')
        y2 = tensor.vector('y2', dtype=self.dtype)
        c = tensor.iscalar('c')
        f = theano.function([c, x1, x2, y1, y2],
                            ifelse(c, (x1, x2), (y1, y2)),
                            mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(2))
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx1 = numpy.asarray(rng.uniform(size=(xlen, )) * 3, 'int32')
        vx2 = numpy.asarray(rng.uniform(size=(xlen, )), self.dtype)
        vy1 = numpy.asarray(rng.uniform(size=(ylen, )) * 3, 'int32')
        vy2 = numpy.asarray(rng.uniform(size=(ylen, )), self.dtype)

        o1, o2 = f(1, vx1, vx2, vy1, vy2)
        assert numpy.allclose(vx1, o1)
        assert numpy.allclose(vx2, o2)

        o1, o2 = f(0, vx1, vx2, vy1, vy2)
        assert numpy.allclose(vy1, o1)
        assert numpy.allclose(vy2, o2)
Esempio n. 13
0
    def test_not_lazy_if_inplace(self):
        # Tests that if the outputs are scalars and the graph is big,
        # we disable the inplace opt to speed up optimization
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        mode = theano.compile.get_mode(self.mode).excluding(
            # Disable many opt to keep the graph big enough to disable
            # the opt.
            'fusion', 'local_add_canonizer',
            'inplace', 'constant_folding', 'constant_folding')
        y2 = reduce(lambda x, y: x + y, [y] + list(range(200)))
        f = theano.function([c, x, y], ifelse(c, x, y2), mode=mode)
        # For not inplace ifelse
        ifnode = [n for n in f.maker.fgraph.toposort()
                  if isinstance(n.op, IfElse)]
        assert len(ifnode) == 1
        assert not ifnode[0].op.as_view
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)

        assert numpy.allclose(vx, f(1, vx, vy))
        assert numpy.allclose(vy + sum(range(200)), f(0, vx, vy))
Esempio n. 14
0
    def test_mixed_dtype(self):
        x1 = tensor.vector('x1', dtype='int32')
        x2 = tensor.vector('x2', dtype=self.dtype)
        y1 = tensor.vector('y1', dtype='int32')
        y2 = tensor.vector('y2', dtype=self.dtype)
        c = tensor.iscalar('c')
        f = theano.function([c, x1, x2, y1, y2],
                            ifelse(c, (x1, x2), (y1, y2)), mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(2))
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx1 = numpy.asarray(rng.uniform(size=(xlen,)) * 3, 'int32')
        vx2 = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy1 = numpy.asarray(rng.uniform(size=(ylen,)) * 3, 'int32')
        vy2 = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)

        o1, o2 = f(1, vx1, vx2, vy1, vy2)
        assert numpy.allclose(vx1, o1)
        assert numpy.allclose(vx2, o2)

        o1, o2 = f(0, vx1, vx2, vy1, vy2)
        assert numpy.allclose(vy1, o1)
        assert numpy.allclose(vy2, o2)
Esempio n. 15
0
    def test_grad_lazy_if(self):
        # Tests that we can compute the gradients through lazy if
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        z = ifelse(c, x, y)
        gx, gy = tensor.grad(z.sum(), [x, y])

        f = theano.function([c, x, y], [self.cast_output(gx),
                                        self.cast_output(gy)],
                            mode=self.mode)
        # There is only 2 of the 3 ifelse that are moved on the GPU.
        # The one that stay on the CPU is for the shape.
        self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
        gx0, gy0 = f(1, vx, vy)
        assert numpy.allclose(gx0.shape, vx.shape)
        assert numpy.allclose(gy0.shape, vy.shape)
        assert numpy.all(numpy.asarray(gx0) == 1.)
        assert numpy.all(numpy.asarray(gy0) == 0.)

        gx0, gy0 = f(0, vx, vy)
        assert numpy.allclose(gx0.shape, vx.shape)
        assert numpy.allclose(gy0.shape, vy.shape)
        assert numpy.all(numpy.asarray(gx0) == 0.)
        assert numpy.all(numpy.asarray(gy0) == 1.)
Esempio n. 16
0
    def test_grad_cast_input(self):
        # Tests the gradient when both inputs are on the GPU.
        x = tensor.vector("x", dtype=self.dtype)
        y = tensor.vector("y", dtype=self.dtype)
        c = tensor.iscalar("c")
        z = ifelse(c, self.cast_output(x), self.cast_output(y))
        gx, gy = tensor.grad(z.sum(), [x, y])

        theano.function([c, x, y], [gx, gy], mode=self.mode)
Esempio n. 17
0
    def test_remove_useless_inputs1(self):
        x = tensor.vector("x")
        y = tensor.vector("y")
        c = tensor.iscalar("c")
        z = ifelse(c, (x, x), (y, y))
        f = theano.function([c, x, y], z)

        ifnode = [n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse)][0]
        assert len(ifnode.inputs) == 3
Esempio n. 18
0
    def test_lazy_if_on_generics(self):
        x = theano.generic()
        y = theano.generic()
        c = tensor.iscalar('c')
        f = theano.function([c, x, y], ifelse(c, x, y))

        vx = ['testX']
        vy = ['testY']
        assert f(1, vx, vy) == vx
        assert f(0, vx, vy) == vy
Esempio n. 19
0
    def test_grad_cast_input(self):
        # Tests the gradient when both inputs are on the GPU.
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        z = ifelse(c, self.cast_output(x), self.cast_output(y))
        gx, gy = tensor.grad(z.sum(), [x, y])

        theano.function([c, x, y], [gx, gy],
                        mode=self.mode)
Esempio n. 20
0
    def test_remove_useless_inputs1(self):
        raise SkipTest("Optimization temporarily disabled")
        x = tensor.vector("x")
        y = tensor.vector("y")
        c = tensor.iscalar("c")
        z = ifelse(c, (x, x), (y, y))
        f = theano.function([c, x, y], z)

        ifnode = [n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse)][0]
        assert len(ifnode.inputs) == 3
Esempio n. 21
0
    def test_remove_useless_inputs1(self):
        raise SkipTest("Optimization temporarily disabled")
        x = tensor.vector('x')
        y = tensor.vector('y')
        c = tensor.iscalar('c')
        z = ifelse(c, (x, x), (y, y))
        f = theano.function([c, x, y], z)

        ifnode = [x for x in f.maker.fgraph.toposort()
                  if isinstance(x.op, IfElse)][0]
        assert len(ifnode.inputs) == 3
Esempio n. 22
0
    def test_remove_useless_inputs1(self):
        raise SkipTest("Optimization temporarily disabled")
        x = tensor.vector('x')
        y = tensor.vector('y')
        c = tensor.iscalar('c')
        z = ifelse(c, (x, x), (y, y))
        f = theano.function([c, x, y], z)

        ifnode = [x for x in f.maker.env.toposort()
                  if isinstance(x.op, IfElse)][0]
        assert len(ifnode.inputs) == 3
Esempio n. 23
0
    def test_remove_useless_inputs2(self):
        x1 = tensor.vector("x1")
        x2 = tensor.vector("x2")
        y1 = tensor.vector("y1")
        y2 = tensor.vector("y2")
        c = tensor.iscalar("c")
        z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2))
        f = theano.function([c, x1, x2, y1, y2], z)

        ifnode = [x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)][0]
        assert len(ifnode.outputs) == 3
Esempio n. 24
0
    def test_remove_useless_inputs2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.vector("x1")
        x2 = tensor.vector("x2")
        y1 = tensor.vector("y1")
        y2 = tensor.vector("y2")
        c = tensor.iscalar("c")
        z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2))
        f = theano.function([c, x1, x2, y1, y2], z)

        ifnode = [x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)][0]
        assert len(ifnode.outputs) == 3
Esempio n. 25
0
    def test_grad_test_values(self):
        # Regression test for test values of `ifelse` gradient.

        backup = theano.config.compute_test_value
        theano.config.compute_test_value = 'raise'
        try:
            x = tensor.scalar('x')
            x.tag.test_value = 1
            # Used to crash due to undefined test value.
            tensor.grad(ifelse(0, x, x), x)
        finally:
            theano.config.compute_test_value = backup
Esempio n. 26
0
    def test_grad_test_values(self):
        # Regression test for test values of `ifelse` gradient.

        backup = theano.config.compute_test_value
        theano.config.compute_test_value = "raise"
        try:
            x = tensor.scalar("x")
            x.tag.test_value = 1
            # Used to crash due to undefined test value.
            tensor.grad(ifelse(0, x, x), x)
        finally:
            theano.config.compute_test_value = backup
Esempio n. 27
0
    def test_pushout3(self):
        x1 = tensor.scalar("x1")
        y1 = tensor.scalar("x2")
        y2 = tensor.scalar("y2")
        c = tensor.iscalar("c")
        two = np.asarray(2, dtype=theano.config.floatX)
        x, y = ifelse(c, (x1, y1), (two, y2), name="f1")
        o3 = np.asarray(0.3, dtype=theano.config.floatX)
        o2 = np.asarray(0.2, dtype=theano.config.floatX)
        z = ifelse(c, o3, o2, name="f2")
        out = x * z * y

        f = theano.function([x1, y1, y2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = np.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()

        assert np.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
        assert np.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
Esempio n. 28
0
    def test_grad_int_value(self):
        w = theano.shared(numpy.random.rand(10))
        b = theano.shared(numpy.random.rand())
        params = [w, b]

        x = tensor.vector()
        y = tensor.scalar()

        score = w.dot(x) + b
        correct = score * y > 0

        loss = ifelse(correct, 0, 1)
        [(param, param - 0.5 * tensor.grad(cost=loss, wrt=param)) for param in params]
Esempio n. 29
0
    def test_pushout3(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        y1 = tensor.scalar('x2')
        y2 = tensor.scalar('y2')
        c = tensor.iscalar('c')
        two = numpy.asarray(2, dtype=theano.config.floatX)
        x, y = ifelse(c, (x1, y1), (two, y2), name='f1')
        o3 = numpy.asarray(0.3, dtype=theano.config.floatX)
        o2 = numpy.asarray(0.2, dtype=theano.config.floatX)
        z = ifelse(c, o3, o2, name='f2')
        out = x * z * y

        f = theano.function([x1, y1, y2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()

        assert numpy.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
        assert numpy.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
Esempio n. 30
0
    def test_remove_useless_inputs2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2))
        f = theano.function([c, x1, x2, y1, y2], z)

        ifnode = [x for x in f.maker.fgraph.toposort()
                  if isinstance(x.op, IfElse)][0]
        assert len(ifnode.outputs) == 3
Esempio n. 31
0
    def test_pushout3(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar("x1")
        y1 = tensor.scalar("x2")
        y2 = tensor.scalar("y2")
        c = tensor.iscalar("c")
        two = numpy.asarray(2, dtype=theano.config.floatX)
        x, y = ifelse(c, (x1, y1), (two, y2), name="f1")
        o3 = numpy.asarray(0.3, dtype=theano.config.floatX)
        o2 = numpy.asarray(0.2, dtype=theano.config.floatX)
        z = ifelse(c, o3, o2, name="f2")
        out = x * z * y

        f = theano.function([x1, y1, y2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()

        assert numpy.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
        assert numpy.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
Esempio n. 32
0
    def test_grad_int_value(self):
        w = theano.shared(np.random.rand(10))
        b = theano.shared(np.random.rand())
        params = [w, b]

        x = tensor.vector()
        y = tensor.scalar()

        score = w.dot(x) + b
        correct = score * y > 0

        loss = ifelse(correct, 0, 1)
        [(param, param - 0.5 * tensor.grad(cost=loss, wrt=param)) for param in params]
Esempio n. 33
0
    def test_pushout2(self):
        raise SkipTest("Optimization temporarily disabled")
        x1 = tensor.scalar('x1')
        x2 = tensor.scalar('x2')
        y1 = tensor.scalar('y1')
        y2 = tensor.scalar('y2')
        w1 = tensor.scalar('w1')
        w2 = tensor.scalar('w2')
        c = tensor.iscalar('c')
        x, y = ifelse(c, (x1, y1), (x2, y2), name='f1')
        z = ifelse(x > y, w1, w2, name='f2')
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c],
                            out,
                            allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = numpy.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()
        if vx1 > vy1:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
                              vx1 * vy1 * vw)

        if vx2 > vy2:
            vw = vw1
        else:
            vw = vw2
        assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
                              vx2 * vy2 * vw)
Esempio n. 34
0
    def test_pushout1(self):
        x1 = tensor.scalar("x1")
        x2 = tensor.scalar("x2")
        y1 = tensor.scalar("y1")
        y2 = tensor.scalar("y2")
        w1 = tensor.scalar("w1")
        w2 = tensor.scalar("w2")
        c = tensor.iscalar("c")
        x, y = ifelse(c, (x1, y1), (x2, y2), name="f1")
        z = ifelse(c, w1, w2, name="f2")
        out = x * z * y

        f = theano.function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = np.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vx2 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()
        vw1 = rng.uniform()
        vw2 = rng.uniform()

        assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 * vy1 * vw1)
        assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 * vy2 * vw2)
Esempio n. 35
0
    def test_lazy_if(self):
        # Tests that lazy if works .. even if the two results have different
        # shapes but the same type (i.e. both vectors, or matrices or
        # whatnot of same dtype)
        x = tensor.vector('x', dtype=self.dtype)
        y = tensor.vector('y', dtype=self.dtype)
        c = tensor.iscalar('c')
        f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(1))
        rng = numpy.random.RandomState(utt.fetch_seed())

        xlen = rng.randint(200)
        ylen = rng.randint(200)

        vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
        vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)

        assert numpy.allclose(vx, f(1, vx, vy))
        assert numpy.allclose(vy, f(0, vx, vy))
Esempio n. 36
0
    def test_sparse_tensor_error(self):
        import theano.sparse

        if not theano.sparse.enable_sparse:
            pytest.skip("Optimization temporarily disabled")
        rng = np.random.RandomState(utt.fetch_seed())
        data = rng.rand(2, 3).astype(self.dtype)
        x = self.shared(data)
        y = theano.sparse.matrix("csc", dtype=self.dtype, name="y")
        z = theano.sparse.matrix("csr", dtype=self.dtype, name="z")
        cond = theano.tensor.iscalar("cond")

        with pytest.raises(TypeError):
            ifelse(cond, x, y)
        with pytest.raises(TypeError):
            ifelse(cond, y, x)
        with pytest.raises(TypeError):
            ifelse(cond, x, z)
        with pytest.raises(TypeError):
            ifelse(cond, z, x)
        with pytest.raises(TypeError):
            ifelse(cond, y, z)
        with pytest.raises(TypeError):
            ifelse(cond, z, y)
Esempio n. 37
0
    def test_sparse_tensor_error(self):
        pytest.importorskip("scipy", minversion="0.7.0")

        import theano.sparse

        rng = np.random.RandomState(utt.fetch_seed())
        data = rng.rand(2, 3).astype(self.dtype)
        x = self.shared(data)
        y = theano.sparse.matrix("csc", dtype=self.dtype, name="y")
        z = theano.sparse.matrix("csr", dtype=self.dtype, name="z")
        cond = theano.tensor.iscalar("cond")

        with pytest.raises(TypeError):
            ifelse(cond, x, y)
        with pytest.raises(TypeError):
            ifelse(cond, y, x)
        with pytest.raises(TypeError):
            ifelse(cond, x, z)
        with pytest.raises(TypeError):
            ifelse(cond, z, x)
        with pytest.raises(TypeError):
            ifelse(cond, y, z)
        with pytest.raises(TypeError):
            ifelse(cond, z, y)