Пример #1
0
    def cmp(a_shp, b_shp):
        a0 = my_rand(*a_shp)
        a = tcn.shared_constructor(a0, 'a')
        cval = my_rand(a_shp[0], b_shp[1])
        c = tcn.shared_constructor(cval.copy(), 'c')

        b = tcn.fmatrix('b')
        b2 = tcn.fmatrix('b2')

        f = pfunc(
                [b, b2],
                [tensor.dot(a, b2) + c],
                updates=[(a, tensor.dot(a, b) + c)],
                mode=mode_with_gpu)

        assert any([node.op == tcn.blas.gpu_gemm_no_inplace
            for node in f.maker.fgraph.toposort()])
        bval = my_rand(*b_shp)
        bval2 = my_rand(*b_shp)
        rval = f(bval, bval2)

        assert numpy.allclose(numpy.dot(a0, bval) + cval, a.get_value())
        assert numpy.allclose(numpy.dot(a0, bval2) + cval, rval)

        # Try with a matrix equal to a0, but with strides in both dims
        a.set_value(a0)
        a.set_value(
                a.get_value(borrow=True,
                    return_internal_type=True)[::-1, ::-1],
                borrow=True)
        f(bval, bval2)
Пример #2
0
    def cmp(a_shp, b_shp):
        a0 = my_rand(* a_shp)
        a = tcn.shared_constructor(a0, 'a')
        cval = my_rand(a_shp[0], b_shp[1])
        c = tcn.shared_constructor(cval.copy(), 'c')

        b = tcn.fmatrix('b')
        b2 = tcn.fmatrix('b2')

        f = pfunc([b, b2],
                  [tensor.dot(a, b2) + c],
                  updates=[(a, tensor.dot(a, b) + c)],
                  mode=mode_with_gpu)

        assert any([node.op == tcn.blas.gpu_gemm_no_inplace
                    for node in f.maker.fgraph.toposort()])
        bval = my_rand(*b_shp)
        bval2 = my_rand(*b_shp)
        rval = f(bval, bval2)

        assert numpy.allclose(numpy.dot(a0, bval) + cval, a.get_value())
        assert numpy.allclose(numpy.dot(a0, bval2) + cval, rval)

        # Try with a matrix equal to a0, but with strides in both dims
        a.set_value(a0)
        a.set_value(
            a.get_value(
                borrow=True,
                return_internal_type=True)[::-1, ::-1],
            borrow=True)
        f(bval, bval2)
Пример #3
0
def test_elemwise_bad_broadcast():
    x = cuda.fmatrix('x')
    y = cuda.fmatrix('y')

    f = theano.function([x, y], x * y, mode=mode_with_gpu)
    assert len(f.maker.env.toposort()) == 2
    assert isinstance(f.maker.env.toposort()[0].op, cuda.GpuElemwise)
    assert f.maker.env.toposort()[1].op == cuda.host_from_gpu

    try:
        f(rand_cuda_ndarray((10, 3)), rand_cuda_ndarray((10, 1)))
    except ValueError:
        pass
    else:
        raise Exception("Theano should have raised an error")
Пример #4
0
def test_elemwise_bad_broadcast():
    x = cuda.fmatrix('x')
    y = cuda.fmatrix('y')

    f = theano.function([x, y], x * y, mode=mode_with_gpu)
    assert len(f.maker.fgraph.toposort()) == 2
    assert isinstance(f.maker.fgraph.toposort()[0].op, cuda.GpuElemwise)
    assert f.maker.fgraph.toposort()[1].op == cuda.host_from_gpu

    try:
        f(rand_cuda_ndarray((10, 3)), rand_cuda_ndarray((10, 1)))
    except ValueError:
        pass
    else:
        raise Exception("Theano should have raised an error")
Пример #5
0
    def cmp(a_shp, b_shp):
        a = tcn.shared_constructor(my_rand(*a_shp), 'a')
        cval = my_rand(a_shp[0], b_shp[1])
        c = tcn.shared_constructor(cval.copy(), 'c')

        b = tcn.fmatrix('b')
        b2 = tcn.fmatrix('b2')

        f = pfunc([b,b2], [tensor.dot(a,b2) + c], updates=[(a, tensor.dot(a,b) + c)], mode=mode_with_gpu)

        a0 = a.get_value() * 1.0
        assert any([node.op == tcn.blas.gpu_gemm_no_inplace for node in f.maker.env.toposort()])
        bval = my_rand(*b_shp)
        bval2 = my_rand(*b_shp)
        rval = f(bval,bval2)

        assert numpy.allclose(numpy.dot(a0, bval)+cval, a.get_value())
        assert numpy.allclose(numpy.dot(a0, bval2)+cval, rval)
Пример #6
0
    def cmp(a_shp, b_shp):
        a = tcn.shared_constructor(my_rand(*a_shp), 'a')
        cval = my_rand(a_shp[0], b_shp[1])
        c = tcn.shared_constructor(cval.copy(), 'c')

        b = tcn.fmatrix('b')
        b2 = tcn.fmatrix('b2')

        f = pfunc([b,b2], [tensor.dot(a,b2) + c], updates=[(a, tensor.dot(a,b) + c)], mode=mode_with_gpu)

        a0 = a.get_value() * 1.0
        assert any([node.op == tcn.blas.gpu_gemm_no_inplace for node in f.maker.env.toposort()])
        bval = my_rand(*b_shp)
        bval2 = my_rand(*b_shp)
        rval = f(bval,bval2)

        assert numpy.allclose(numpy.dot(a0, bval)+cval, a.get_value())
        assert numpy.allclose(numpy.dot(a0, bval2)+cval, rval)
Пример #7
0
def test_deepcopy():
    a = cuda.fmatrix()
    a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))

    # We force the c code to check that we generate c code
    mode = theano.Mode("c", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))

    # We force the python linker as the default code should work for this op
    mode = theano.Mode("py", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
def test_deepcopy():
    a = cuda.fmatrix()
    a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))

    # We force the c code to check that we generate c code
    mode = theano.Mode("c", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))

    # We force the python linker as the default code should work for this op
    mode = theano.Mode("py", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
Пример #9
0
def test_flatten():
    x = cuda.fmatrix('x')
    f = theano.function([x], x.flatten())
    assert len(f([[0., 0.], [0., 0.]]).shape) == 1
Пример #10
0
 def test_matrix(self):
     x = cuda.fmatrix()
     y = numpy.zeros((5, 7), dtype='float32')
     assert y.size == theano.function([x], x.size)(y)
Пример #11
0
 def test_matrix(self):
     x = cuda.fmatrix()
     y = numpy.zeros((5, 7), dtype='float32')
     assert y.size == theano.function([x], x.size)(y)
Пример #12
0
def test_flatten():
    x = cuda.fmatrix('x')
    f = theano.function([x], x.flatten())
    assert len(f( [[0.,0.],[0.,0.]] ).shape)==1