コード例 #1
0
def test_hostfromgpu_shape_i():
    # Test that the shape is lifted over hostfromgpu

    m = mode_with_gpu.including("local_dot_to_dot22",
                                "local_dot22_to_dot22scalar", "specialize")
    a = tt.fmatrix("a")
    ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
    av = np.asarray(np.random.rand(5, 4), dtype="float32")
    cv = gpuarray.asarray(np.random.rand(5, 4),
                          dtype="float32",
                          context=get_context(test_ctx_name))

    f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
    assert any(
        isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
    f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
    topo = f.maker.fgraph.toposort()
    assert isinstance(topo[0].op, tt.opt.Shape_i)
    assert isinstance(topo[1].op, tt.opt.Shape_i)
    assert isinstance(topo[2].op, tt.opt.MakeVector)
    assert tuple(f(av)) == (5, 4)

    f = theano.function([ca], host_from_gpu(ca), mode=m)
    assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
    f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
    topo = f.maker.fgraph.toposort()
    assert isinstance(topo[0].op, theano.compile.Shape_i)
    assert isinstance(topo[1].op, theano.compile.Shape_i)
    assert isinstance(topo[2].op, tt.opt.MakeVector)
    assert tuple(f(cv)) == (5, 4)
コード例 #2
0
def test_transfer_cpu_gpu():
    a = tt.fmatrix("a")
    g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")

    av = np.asarray(rng.rand(5, 4), dtype="float32")
    gv = gpuarray.array(av, context=get_context(test_ctx_name))

    f = theano.function([a], GpuFromHost(test_ctx_name)(a))
    fv = f(av)
    assert GpuArrayType.values_eq(fv, gv)

    f = theano.function([g], host_from_gpu(g))
    fv = f(gv)
    assert np.all(fv == av)
コード例 #3
0
ファイル: patches.py プロジェクト: shoaibahmed/pl-cnn
def local_mypool_dnn_alternative(node):
    if not dnn_available():
        return
    if isinstance(node.op, MyPool):
        if not node.op.ignore_border:
            return
        img, = node.inputs
        ds = node.op.ds
        stride = node.op.st
        pad = node.op.padding
        mode = node.op.mode
        if (img.owner and isinstance(img.owner.op, HostFromGpu)):
            ret = dnn_pool(gpu_contiguous(img.owner.inputs[0]),
                           ds, stride=stride, pad=pad, mode=mode)
            return [host_from_gpu(ret)]
コード例 #4
0
def test_transfer_strided():
    # This is just to ensure that it works in theano
    # libgpuarray has a much more comprehensive suit of tests to
    # ensure correctness
    a = tt.fmatrix("a")
    g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")

    av = np.asarray(rng.rand(5, 8), dtype="float32")
    gv = gpuarray.array(av, context=get_context(test_ctx_name))

    av = av[:, ::2]
    gv = gv[:, ::2]

    f = theano.function([a], GpuFromHost(test_ctx_name)(a))
    fv = f(av)
    assert GpuArrayType.values_eq(fv, gv)

    f = theano.function([g], host_from_gpu(g))
    fv = f(gv)
    assert np.all(fv == av)
コード例 #5
0
ファイル: patches.py プロジェクト: shoaibahmed/pl-cnn
def local_mypool_dnn_grad_stride(node):
    if not dnn_available():
        return
    if isinstance(node.op, MyMaxPoolGrad):
        if not node.op.ignore_border:
            return
        inp, out, inp_grad = node.inputs
        ds = node.op.ds
        st = node.op.st
        pad = node.op.padding
        mode = node.op.mode

        if ((inp.owner and isinstance(inp.owner.op, HostFromGpu)) or
            (out.owner and isinstance(out.owner.op, HostFromGpu)) or
            (inp_grad.owner and isinstance(inp_grad.owner.op,
                                           HostFromGpu))):

            ret = GpuDnnPoolGrad(mode=mode)(gpu_contiguous(inp),
                                            gpu_contiguous(out),
                                            gpu_contiguous(inp_grad),
                                            ds, st, pad)
            return [host_from_gpu(ret)]