コード例 #1
0
def test_float32_shared_constructor():

    npy_row = numpy.zeros((1, 10), dtype='float32')

    def eq(a, b):
        return a == b

    # test that we can create a CudaNdarray
    assert (f32sc(npy_row).type == CudaNdarrayType((False, False)))

    # test that broadcastable arg is accepted, and that they
    # don't strictly have to be tuples
    assert eq(
        f32sc(npy_row, broadcastable=(True, False)).type,
        CudaNdarrayType((True, False)))
    assert eq(
        f32sc(npy_row, broadcastable=[True, False]).type,
        CudaNdarrayType((True, False)))
    assert eq(
        f32sc(npy_row, broadcastable=numpy.array([True, False])).type,
        CudaNdarrayType([True, False]))

    # test that we can make non-matrix shared vars
    assert eq(
        f32sc(numpy.zeros((2, 3, 4, 5), dtype='float32')).type,
        CudaNdarrayType((False, ) * 4))
コード例 #2
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
def test_float32_shared_constructor():

    npy_row = numpy.zeros((1, 10), dtype='float32')

    def eq(a, b):
        return a == b

    # test that we can create a CudaNdarray
    assert (f32sc(npy_row).type == CudaNdarrayType((False, False)))

    # test that broadcastable arg is accepted, and that they
    # don't strictly have to be tuples
    assert eq(
            f32sc(npy_row, broadcastable=(True, False)).type,
            CudaNdarrayType((True, False)))
    assert eq(
            f32sc(npy_row, broadcastable=[True, False]).type,
            CudaNdarrayType((True, False)))
    assert eq(
            f32sc(npy_row, broadcastable=numpy.array([True, False])).type,
            CudaNdarrayType([True, False]))

    # test that we can make non-matrix shared vars
    assert eq(
            f32sc(numpy.zeros((2, 3, 4, 5), dtype='float32')).type,
            CudaNdarrayType((False,) * 4))
コード例 #3
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_ndim_mismatch(self):
        data = self.rng.rand(5).astype('float32')
        x = f32sc(data)
        y = tensor.fcol('y')
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x)
コード例 #4
0
ファイル: test_var.py プロジェクト: HaniAlmousli/Theano
def test_givens():
    # Test that you can use a TensorType expression to replace a
    # CudaNdarrayType in the givens dictionary.
    # This test case uses code mentionned in #757
    data = numpy.float32([1,2,3,4])
    x = f32sc(data)
    y = x**2
    f = theano.function([x], y, givens={x:x+1})
コード例 #5
0
def test_givens():
    # Test that you can use a TensorType expression to replace a
    # CudaNdarrayType in the givens dictionary.
    # This test case uses code mentionned in #757
    data = numpy.float32([1, 2, 3, 4])
    x = f32sc(data)
    y = x**2
    f = theano.function([], y, givens={x: x + 1})
    f()
コード例 #6
0
    def test_1(self):
        data = numpy.float32([1, 2, 3, 4])
        x = f32sc(data)
        y = x**2
        f = theano.function([], y, updates={x: x + 1})
        f()

        # Test that we can update with a CudaVariable
        f = theano.function([], y, updates={x: cuda.gpu_from_host(x + 1)})
        f()
コード例 #7
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_broadcast_mismatch(self):
        data = self.rng.rand(2, 3).astype('float32')
        x = f32sc(data)
        print x.broadcastable
        y = tensor.frow('y')
        print y.broadcastable
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x)
コード例 #8
0
ファイル: test_var.py プロジェクト: nicholas-leonard/Theano
    def test_1(self):
        data = numpy.float32([1, 2, 3, 4])
        x = f32sc(data)
        y = x ** 2
        f = theano.function([], y, updates=[(x, x + 1)])
        f()

        # Test that we can update with a CudaVariable
        f = theano.function([], y, updates=[(x, cuda.gpu_from_host(x + 1))])
        f()
コード例 #9
0
    def test_err_broadcast(self):
        # Test that we raise a good error message when we don't
        # have the same number of dimensions.
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        # the update_var has type matrix, and the update expression
        # is a broadcasted scalar, and that should not be allowed.
        self.assertRaises(TypeError, theano.function, inputs=[], outputs=[],
                          updates=[(output_var,
                                   output_var.sum().dimshuffle('x', 'x'))])
コード例 #10
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_3(self):
        # Test that broadcastable dimensions don't screw up
        # update expressions.
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        # the update_var has type matrix, and the update expression
        # is a broadcasted scalar, and that should be allowed.
        output_func = theano.function(inputs=[], outputs=[],
                updates={output_var: output_var.sum().dimshuffle('x', 'x')})
        output_func()
コード例 #11
0
ファイル: test_var.py プロジェクト: nicholas-leonard/Theano
    def test_err_ndim(self):
        # Test that we raise a good error message when we don't
        # have the same number of dimensions.
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        # the update_var has type matrix, and the update expression
        # is a broadcasted scalar, and that should not be allowed.
        self.assertRaises(TypeError, theano.function, inputs=[], outputs=[],
                          updates=[(output_var,
                                   output_var.sum())])
コード例 #12
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_2(self):
        # This test case uses code mentionned in #698
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output",
                value=numpy.zeros((10, 10), 'float32'))

        x = tensor.fmatrix('x')
        output_updates = {output_var: x ** 2}
        output_givens = {x: data}
        output_func = theano.function(inputs=[], outputs=[],
                updates=output_updates, givens=output_givens)
        output_func()
コード例 #13
0
    def test_2(self):
        # This test case uses code mentionned in #698
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output",
                           value=numpy.zeros((10, 10), 'float32'))

        x = tensor.fmatrix('x')
        output_updates = [(output_var, x ** 2)]
        output_givens = {x: data}
        output_func = theano.function(
            inputs=[], outputs=[],
            updates=output_updates, givens=output_givens)
        output_func()
コード例 #14
0
ファイル: test_var.py プロジェクト: nicholas-leonard/Theano
def test_givens():
    # Test that you can use a TensorType expression to replace a
    # CudaNdarrayType in the givens dictionary.
    # This test case uses code mentionned in #757
    data = numpy.float32([1, 2, 3, 4])
    x = f32sc(data)
    y = x ** 2
    f = theano.function([], y, givens={x: x + 1})
    f()
    assert isinstance(f.maker.fgraph.toposort()[-1].op, tensor.Elemwise)
    f = theano.function([], y, givens={x: x + 1}, mode=mode_with_gpu)
    f()
    assert isinstance(f.maker.fgraph.toposort()[-2].op, cuda.GpuElemwise)
コード例 #15
0
    def test_3(self):
        # Test that broadcastable dimensions don't screw up
        # update expressions.
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        # the update_var has type matrix, and the update expression
        # is a broadcasted scalar, and that should be allowed.
        output_func = theano.function(
            inputs=[],
            outputs=[],
            updates={output_var: output_var.sum().dimshuffle('x', 'x')})
        output_func()
コード例 #16
0
    def test_broadcast(self):
        # Test that we can rebroadcast
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        up = tensor.unbroadcast(output_var.sum().dimshuffle('x', 'x'), 0, 1)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()

        up = tensor.patternbroadcast(output_var.sum().dimshuffle('x', 'x'),
                                     output_var.type.broadcastable)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()
コード例 #17
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_sparse_tensor_error(self):
        data = self.rng.rand(2, 3).astype('float32')
        x = f32sc(data)
        y = sparse.matrix('csc', dtype='float32', name='y')
        z = sparse.matrix('csr', dtype='float32', name='z')
        cond = theano.tensor.iscalar('cond')

        # Right now (2012-01-19), a ValueError gets raised, but I thing
        # a TypeError (like in the other cases) would be fine.
        self.assertRaises((TypeError, ValueError), ifelse, cond, x, y)
        self.assertRaises((TypeError, ValueError), ifelse, cond, y, x)
        self.assertRaises((TypeError, ValueError), ifelse, cond, x, z)
        self.assertRaises((TypeError, ValueError), ifelse, cond, z, x)
        self.assertRaises((TypeError, ValueError), ifelse, cond, y, z)
        self.assertRaises((TypeError, ValueError), ifelse, cond, z, y)
コード例 #18
0
ファイル: test_var.py プロジェクト: nicholas-leonard/Theano
    def test_broadcast(self):
        # Test that we can rebroadcast
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        up = tensor.unbroadcast(output_var.sum().dimshuffle('x', 'x'), 0, 1)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()

        up = tensor.patternbroadcast(output_var.sum().dimshuffle('x', 'x'),
                                     output_var.type.broadcastable)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()
コード例 #19
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
    def test_cuda_tensor(self):
        data = self.rng.rand(4).astype('float32')
        x = f32sc(data)
        y = x + 1
        cond = theano.tensor.iscalar('cond')

        assert isinstance(x.type, CudaNdarrayType)
        assert isinstance(y.type, TensorType)

        out1 = ifelse(cond, x, y)
        out2 = ifelse(cond, y, x)

        assert isinstance(out1.type, TensorType)
        assert isinstance(out2.type, TensorType)

        f = theano.function([cond], out1)
        g = theano.function([cond], out2)

        assert numpy.all(f(0) == data + 1)
        assert numpy.all(f(1) == data)
        assert numpy.all(g(0) == data)
        assert numpy.all(g(1) == data + 1)
コード例 #20
0
 def test_1(self):
     data = numpy.float32([1, 2, 3, 4])
     x = f32sc(data)
     y = x**2
     f = theano.function([], y, updates={x: x + 1})
     f()
コード例 #21
0
ファイル: test_var.py プロジェクト: NicolasBouchard/Theano
 def test_1(self):
     data = numpy.float32([1, 2, 3, 4])
     x = f32sc(data)
     y = x ** 2
     f = theano.function([], y, updates={x: x + 1})
     f()