Esempio n. 1
0
def comp_get(arr, f):
    with use_cuda():
        g = renom.core.GPUValue(arr)
        v1 = f(g)

    v2 = f(arr)
    assert np.allclose(v2, v1.new_array())
Esempio n. 2
0
def test_cu_reduce_arg_max(a, axis):
    with use_cuda():
        g = renom.core.GPUValue(a)

        ret = renom.cuda.cu_reduce_argmax(g, axis)
        renom.cuda.cuDeviceSynchronize()
        close_shape(ret.new_array(), np.argmax(a, axis))
Esempio n. 3
0
def test_gpu_node_deconvolutionnd(a):
    a = np.ones_like(a)
    with use_cuda():

        layer = rm.DeconvNd(channel=2, filter=3, stride=1, padding=0)

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Esempio n. 4
0
def test_gpu_node_convolutionnd(a):
    with use_cuda():

        layer = rm.ConvNd(channel=2, filter=1, stride=1, padding=0)
        #layer.params["w"] = rm.Variable(np.random.rand(32, 3, 3, 3))
        #layer.params["b"] = rm.Variable(np.random.rand(1, 32, 1, 1))

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Esempio n. 5
0
def test_gpu_node_deconvolution2d(a):
    with use_cuda():

        layer = rm.Deconv2d(channel=32)
        layer.params["w"] = rm.Variable(np.random.rand(3, 32, 3, 3))
        layer.params["b"] = rm.Variable(np.random.rand(1, 32, 1, 1))

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Esempio n. 6
0
def test_cu_reduce_min(a, axis):
    with use_cuda():
        g = renom.core.GPUValue(a)

        ret = renom.cuda.cu_reduce_min(g, axis, keepdims=False)
        close_shape(ret.new_array(), np.min(a, axis, keepdims=False))

        ret = renom.cuda.cu_reduce_min(g, axis, keepdims=True)
        close_shape(ret.new_array(), np.min(a, axis, keepdims=True))
Esempio n. 7
0
def test_cusum(a, axis):
    with use_cuda():
        g = renom.core.GPUValue(a)

        ret = renom.cuda.cusum(g, axis, keepdims=False)
        close(ret.new_array(), np.sum(a, axis, keepdims=False))

        ret = renom.cuda.cusum(g, axis, keepdims=True)
        close(ret.new_array(), np.sum(a, axis, keepdims=True))
Esempio n. 8
0
def test_where(node):
    #    set_cuda_active(is_active)

    with use_cuda():
        g1 = Variable(node)
        g3 = rm.sum(rm.where(g1 > 0.5, g1, 1))
        g = g3.grad()
        g_g1 = g.get(g1)
        g3.to_cpu()
        g_g1.to_cpu()

    with use_cuda():
        c3 = rm.sum(rm.where(g1 > 0.5, g1, 1))
        c = c3.grad()
        c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Esempio n. 9
0
def test_cu_reduce_max(a, axis):
    with use_cuda():
        g = renom.core.GPUValue(a)

        ret = renom.cuda.cu_reduce_max(g, axis, keepdims=False)
        renom.cuda.cuDeviceSynchronize()
        close_shape(ret.new_array(), np.max(a, axis, keepdims=False))

        ret = renom.cuda.cu_reduce_max(g, axis, keepdims=True)
        close_shape(ret.new_array(), np.max(a, axis, keepdims=True))
Esempio n. 10
0
def test_gpu_node_neg():
    with use_cuda():
        g1 = Variable(np.array([1., 2.]))
        g2 = -g1
        assert np.allclose(g2, [-1, -2])
        assert not np.allclose(g2, [-1, -3])

        g3 = -g1 * 2
        assert np.allclose(g3, [-2, -4])
        assert not np.allclose(g3, [-3, -4])
Esempio n. 11
0
def test_transpose():
    with use_cuda():
        for n in range(0, 5):
            shape = [2 * (i + 1) for i in range(n)]
            a = np.arange(np.prod(shape)).reshape(shape).astype('float32')
            b = renom.core.GPUValue(a)
            for axis in itertools.permutations(range(len(shape))):
                aa = np.transpose(a, axis)
                bb = b.transpose(axis)

                assert np.allclose(aa, bb.new_array())
Esempio n. 12
0
def test_gpu_node_average_pooling(a):
    with use_cuda():

        layer = rm.AveragePool2d()

        g1 = Variable(a)
        g3 = rm.sum(layer(g1))
        g = g3.grad()
        g_g3 = g.get(g1)
        g3.to_cpu()

    c3 = rm.sum(layer(g1))
    c3.grad()
    c_g3 = g.get(g1)

    close(g3, c3)
    close(c_g3, g_g3)
Esempio n. 13
0
def test_gpu_node_spatial_dropout(a):
    with use_cuda():

        g1 = Variable(a)

        layer = rm.SpatialDropout()

        np.random.seed(1)
        g3 = rm.sum(layer(g1))
        g = g3.grad()
        g_g1 = g.get(g1)
        g3.to_cpu()

    np.random.seed(1)
    c3 = rm.sum(layer(g1))
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Esempio n. 14
0
def test_gpu_node_add(a, b):
    with use_cuda():

        g1 = Variable(a)
        g2 = Variable(b)

        g3 = rm.sum(g1 + g2)
        g = g3.grad()

        g_g1 = g.get(g1)
        g_g2 = g.get(g2)
        g3.to_cpu()

    c3 = rm.sum(g1 + g2)
    c = c3.grad()
    c_g1 = c.get(g1)
    c_g2 = c.get(g2)

    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
Esempio n. 15
0
def test_lrn(node):
    layer = rm.Lrn()

    with use_cuda():
        g1 = Variable(node)

        g3 = rm.sum(layer(g1))
        g = g3.grad()
        g_g1 = g.get(g1)

        g3.to_cpu()
        g_g1.to_cpu()

    set_cuda_active(False)
    c3 = rm.sum(layer(g1))

    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Esempio n. 16
0
def test_gpu_node_max_pooling(a):
    with use_cuda():

        layer = rm.MaxPool2d()

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c3.grad()
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g3, g_g3)
Esempio n. 17
0
def test_gpu_node_upconvolution2d(a):
    with use_cuda():

        layer = rm.Deconv2d(channel=32)

        g1 = Variable(a)
        g3 = rm.sum(layer(g1))
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g3.to_cpu()

    c3 = rm.sum(layer(g1))
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Esempio n. 18
0
def test_cu_reduce_arg_min(a, axis):
    with use_cuda():
        g = renom.core.GPUValue(a)

        ret = renom.cuda.cu_reduce_argmin(g, axis)
        close_shape(ret.new_array(), np.argmin(a, axis))