예제 #1
0
def test_batch_normalize_featuremap(a):
    layer = rm.BatchNormalize(mode=BATCH_NORMALIZE_FEATUREMAP, momentum=0.1)

    set_cuda_active(True)

    g1 = Variable(a)

    for _ in range(10):
        g3 = layer(g1)
    g3.to_cpu()

    layer.set_models(inference=True)
    g4 = layer(g1)
    layer.set_models(inference=False)

    set_cuda_active(False)
    layer._mov_mean = 0
    layer._mov_std = 0
    for _ in range(10):
        c3 = layer(g1)

    layer.set_models(inference=True)
    c4 = layer(g1)
    layer.set_models(inference=False)

    close(g3, c3)
    close(g4, c4)
    close(g3.attrs._m.new_array(), c3.attrs._m)
    close(g3.attrs._v.new_array(), c3.attrs._v)
    close(g3.attrs._mov_m.new_array(), c3.attrs._mov_m)
    close(g3.attrs._mov_v.new_array(), c3.attrs._mov_v)
예제 #2
0
 def _backward_gpu(self, context, dy, **kwargs):
     dy.to_cpu()
     cu.set_cuda_active(False)
     dx = imnpool(self.attrs._original_x, self.attrs._kernel, self.attrs._stride,
                  self.attrs._padding, mode="average", alternate_input=dy)
     cu.set_cuda_active(True)
     self.attrs._x._update_diff(context, dx)
예제 #3
0
def test_ddpg(agent, environ, fit_args, use_gpu):
    cuda.set_cuda_active(True)
    action_shape = (1,)
    state_shape = (2,)
    env = environ(action_shape, state_shape)
    actor_network = rm.Sequential([
        rm.Dense(5),
        rm.Dense(action_shape[0]),
    ])

    class Critic(rm.Model):
        def __init__(self):
            self._l1 = rm.Dense(5)
            self._l2 = rm.Dense(1)

        def forward(self, x, action):
            return self._l2(rm.concat(self._l1(x), action))

    critic_network = Critic()
    model = agent(env, actor_network, critic_network)
    action = model.action(np.random.rand(*state_shape))

    assert action.shape == action_shape

    # Check fit
    model.fit(epoch=1, epoch_step=10, batch_size=4, random_step=20, test_step=10, **fit_args)
    print(model.history)
예제 #4
0
def test_gpu_node_dot(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    g3 = dot(g1, g2)
    g4 = rm.sum(g3)
    g = g4.grad()
    g_g1 = g.get(g1)
    g_g2 = g.get(g2)
    g_g3 = g.get(g3)
    g3.to_cpu()
    g4.to_cpu()

    set_cuda_active(False)
    c3 = dot(g1, g2)
    c4 = rm.sum(c3)
    c = c4.grad()
    c_g1 = c.get(g1)
    c_g2 = c.get(g2)
    c_c3 = c.get(c3)

    close(g3, c3)
    close(g4, c4)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_c3, g_g3)
예제 #5
0
def test_gpu_lstm(a):
    layer = rm.Lstm(output_size=2)

    def func(x):
        loss = 0
        for _ in range(5):
            loss += sum(layer(x))
        layer.truncate()
        return loss

    set_cuda_active(True)

    g1 = Variable(a)

    g3 = func(g1)
    g3.to_cpu()

    g = g3.grad()
    g_g1 = g.get(g1)
    g_g1.to_cpu()

    set_cuda_active(False)
    c3 = func(g1)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
예제 #6
0
def test_mean_squared_error(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.mean_squared_error(node, x)
    compare(func, node, node, x)
예제 #7
0
def test_transpose(node, axis, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(node.transpose(axis))
    compare(func, node, node)
예제 #8
0
def test_min(node, axis, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.amin(node, axis))
    compare(func, node, node)
예제 #9
0
def test_reshape(node, shape, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.reshape(node, shape)) + sum(node.reshape(shape)) + sum(node.reshape(*shape))
    compare(func, node, node)
예제 #10
0
def test_T(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(node.T)
    compare(func, node, node)
예제 #11
0
def test_add(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    # Add
    def func_add1(node, x):
        return sum(x + node)
    compare(func_add1, node, node, x)

    def func_add2(node, x):
        return sum(node + x)
    compare(func_add2, node, node, x)

    def func_iadd1(node, x):
        node += x
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_iadd2(node, x):
        x += node
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
예제 #12
0
def test_where(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.where(node > 0.5, node, x))
    compare(func, node, node, x)
예제 #13
0
def test_cross_entropy(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.cross_entropy(node, x)
    compare(func, node, node, x)
예제 #14
0
def test_leaky_relu_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.leaky_relu(node))
    compare(func, node, node)
예제 #15
0
def test_softsign(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.softsign(node))
    compare(func, node, node)
예제 #16
0
def test_sigmoid_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(sigmoid(node))
    compare(func, node, node)
예제 #17
0
def test_gpu_broadcast(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    assert np.allclose(a + b, (g1 + g2))
예제 #18
0
def test_gpu_layer_normalize(a):
    set_cuda_active(True)

    g1 = Variable(a)

    layer = rm.LayerNormalize()

    g2 = layer(g1)
    g3 = rm.sum(g2)
    g = g3.grad(detach_graph=False)
    g_g1 = g.get(g1)
    g_g2 = g.get(layer.params["gain"])
    g_g3 = g.get(layer.params["bias"])

    set_cuda_active(False)

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad(detach_graph=False)
    c_c1 = c.get(g1)
    c_c2 = c.get(layer.params["gain"])
    c_c3 = c.get(layer.params["bias"])

    close(g2, c2)
    close(g3, c3)
    close(g_g1, c_c1)
    close(g_g2, c_c2)
    close(g_g3, c_c3)
예제 #19
0
def test_gpu_gru(a):
    unit = Gru(output_size=2)

    def func(x):
        return sum(unit(x))

    set_cuda_active(True)

    g1 = Variable(a)

    g3 = func(g1)
    g3.to_cpu()

    g = g3.grad()
    g_g1 = g.get(g1)
    g_g1.to_cpu()

    set_cuda_active(False)
    unit.truncate()
    c3 = func(g1)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
예제 #20
0
def test_concat(node, axis, use_gpu):
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.concat(node, axis=axis))

    compare(func, node[0], node)
예제 #21
0
def test_div(node, x, raise_error, use_gpu):
    node = Variable(node)
    x = np.array(x)
    set_cuda_active(use_gpu)

    def func_div1(node, x):
        return sum(x / node)

    compare(func_div1, node, node, x)

    def func_div2(node, x):
        return sum(node / x)

    compare(func_div2, node, node, x)

    def func_idiv1(node, x):
        node /= x
        return sum(node)

    try:
        compare(func_idiv1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_idiv2(node, x):
        x /= node
        return sum(node)

    try:
        compare(func_idiv2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
예제 #22
0
def test_mul(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_mul1(node, x):
        return sum(x * node)

    compare(func_mul1, node, node, x)

    def func_mul2(node, x):
        return sum(node * x)

    compare(func_mul2, node, node, x)

    def func_imul1(node, x):
        node *= x
        return sum(node)

    try:
        compare(func_imul1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_imul2(node, x):
        x *= node
        return sum(node)

    try:
        compare(func_imul2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
예제 #23
0
def test_sub(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_sub1(node, x):
        return sum(x - node)

    compare(func_sub1, node, node, x)

    def func_sub2(node, x):
        return sum(node - x)

    compare(func_sub2, node, node, x)

    def func_isub1(node, x):
        node -= x
        return sum(node)

    try:
        compare(func_isub1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_isub2(node, x):
        x -= node
        return sum(node)

    try:
        compare(func_isub2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
예제 #24
0
def test_average_pool2d(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = AveragePool2d()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
예제 #25
0
def test_lrn(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = Lrn()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
예제 #26
0
def test_embedding(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = rm.Embedding(output_size=2, input_size=2)

    def func(node):
        return sum(layer(node))
    compare(func, layer.params["w"], node)
예제 #27
0
 def __call__(self):
     set_cuda_active(True)
     with self.gpu_resource:
         self._gpu = self.gpus.pop()
         try:
             with use_device(self._gpu):
                 return self._exec()
         finally:
             self.gpus.add(self._gpu)
예제 #28
0
파일: server.py 프로젝트: clockfly/ReNomRG
 def run(self, f, *args, **kwargs):
     with self.gpu_resource:
         self.active_gpu.id = self.gpus.pop()
         try:
             set_cuda_active(True)
             with use_device(self.active_gpu.id):
                 return f(*args, **kwargs)
         finally:
             self.gpus.add(self.active_gpu.id)
             release_mem_pool()
예제 #29
0
def test_concat(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    assert np.allclose(rm.concat(node, x), np.concatenate((node, x), 1))

    def func(node, x):
        return sum(rm.concat(node, x))

    compare(func, node, node, x)
예제 #30
0
def test_gpu_node_neg():
    set_cuda_active(True)
    a = np.array(np.random.rand(10, )).astype(precision)

    g1 = Variable(a)
    g2 = -g1
    g2.to_cpu()

    set_cuda_active(False)
    close(g2, -g1)