Пример #1
0
def test_add(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    # Add
    def func_add1(node, x):
        return sum(x + node)
    compare(func_add1, node, node, x)

    def func_add2(node, x):
        return sum(node + x)
    compare(func_add2, node, node, x)

    def func_iadd1(node, x):
        node += x
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_iadd2(node, x):
        x += node
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Пример #2
0
def test_reshape(node, shape, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.reshape(node, shape)) + sum(node.reshape(shape)) + sum(node.reshape(*shape))
    compare(func, node, node)
Пример #3
0
def test_cross_entropy(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.cross_entropy(node, x)
    compare(func, node, node, x)
Пример #4
0
def test_where(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.where(node > 0.5, node, x))
    compare(func, node, node, x)
Пример #5
0
def test_leaky_relu_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.leaky_relu(node))
    compare(func, node, node)
Пример #6
0
def test_mean_squared_error(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.mean_squared_error(node, x)
    compare(func, node, node, x)
Пример #7
0
def test_div(node, x, raise_error, use_gpu):
    node = Variable(node)
    x = np.array(x)
    set_cuda_active(use_gpu)

    def func_div1(node, x):
        return sum(x / node)
    compare(func_div1, node, node, x)

    def func_div2(node, x):
        return sum(node / x)
    compare(func_div2, node, node, x)

    def func_idiv1(node, x):
        node /= x
        return sum(node)
    try:
        compare(func_idiv1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_idiv2(node, x):
        x /= node
        return sum(node)
    try:
        compare(func_idiv2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Пример #8
0
def test_max(node, axis, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.amax(node, axis))
    compare(func, node, node)
Пример #9
0
def test_sub(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_sub1(node, x):
        return sum(x - node)
    compare(func_sub1, node, node, x)

    def func_sub2(node, x):
        return sum(node - x)
    compare(func_sub2, node, node, x)

    def func_isub1(node, x):
        node -= x
        return sum(node)
    try:
        compare(func_isub1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_isub2(node, x):
        x -= node
        return sum(node)
    try:
        compare(func_isub2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Пример #10
0
def test_mul(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_mul1(node, x):
        return sum(x * node)
    compare(func_mul1, node, node, x)

    def func_mul2(node, x):
        return sum(node * x)
    compare(func_mul2, node, node, x)

    def func_imul1(node, x):
        node *= x
        return sum(node)
    try:
        compare(func_imul1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_imul2(node, x):
        x *= node
        return sum(node)
    try:
        compare(func_imul2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Пример #11
0
def test_max_pool2d(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    layer = MaxPool2d(filter=2, padding=1, stride=2)

    def func(node):
        return sum(layer(node))

    for trial in range(3):
        try:
            compare(func, node, node)
            return
        except AssertionError:
            node = Variable(rand(node.shape))
    raise AssertionError("Failed all three attempts.")
Пример #12
0
def test_max(node, axis, use_gpu, keep_dimensions):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions))

    def func2(node):
        return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) + 10)

    compare(func2, node, node)

    def func3(node):
        return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) * 3 + 15)

    compare(func3, node, node)

    def func4(node):
        return sum(
            rm.amax(node, axis=axis, keepdims=keep_dimensions) +
            rm.amax(node, axis=axis, keepdims=keep_dimensions))

    compare(func4, node, node)

    # A simple check to see if we actually return the maximum
    renom_max = rm.amax(node, axis=axis, keepdims=keep_dimensions).as_ndarray()
    numpy_max = np.amax(node, axis=axis, keepdims=keep_dimensions)
    assert np.allclose(renom_max, numpy_max, atol=1e-5, rtol=1e-3)

    compare(func, node, node)
Пример #13
0
 def weight_initiallize(self, input_size):
     size_i = input_size[0] if isinstance(input_size, tuple) else input_size
     size_o = self._output_size
     self.params = {
         "w": Variable(self._initializer((size_i, size_o)),
                       auto_update=True)
     }
Пример #14
0
def test_batch_normalize_featuremap(a):
    layer = rm.BatchNormalize(mode=BATCH_NORMALIZE_FEATUREMAP, momentum=0.1)

    set_cuda_active(True)

    g1 = Variable(a)

    for _ in range(10):
        g3 = layer(g1)
    g3.to_cpu()

    layer.set_models(inference=True)
    g4 = layer(g1)
    layer.set_models(inference=False)

    set_cuda_active(False)
    layer._mov_mean = 0
    layer._mov_std = 0
    for _ in range(10):
        c3 = layer(g1)

    layer.set_models(inference=True)
    c4 = layer(g1)
    layer.set_models(inference=False)

    close(g3, c3)
    close(g4, c4)
    close(g3.attrs._m.new_array(), c3.attrs._m)
    close(g3.attrs._v.new_array(), c3.attrs._v)
    close(g3.attrs._mov_m.new_array(), c3.attrs._mov_m)
    close(g3.attrs._mov_v.new_array(), c3.attrs._mov_v)
Пример #15
0
def test_sigmoid_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(sigmoid(node))
    compare(func, node, node)
Пример #16
0
def test_transpose(node, axis, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(node.transpose(axis))
    compare(func, node, node)
Пример #17
0
def test_T(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(node.T)
    compare(func, node, node)
Пример #18
0
def test_gpu_lstm(a):
    layer = rm.Lstm(output_size=2)

    def func(x):
        loss = 0
        for _ in range(5):
            loss += sum(layer(x))
        layer.truncate()
        return loss

    set_cuda_active(True)

    g1 = Variable(a)

    g3 = func(g1)
    g3.to_cpu()

    g = g3.grad()
    g_g1 = g.get(g1)
    g_g1.to_cpu()

    set_cuda_active(False)
    c3 = func(g1)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Пример #19
0
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.zeros((1, size_o * 4), dtype=precision)
     bias[:, size_o:size_o * 2] = 1
     self.params = {
         "w":
         Variable(self._initializer((size_i, size_o * 4)),
                  auto_update=True,
                  weight_decay=self._weight_decay),
         "wr":
         Variable(self._initializer((size_o, size_o * 4)),
                  auto_update=True,
                  weight_decay=self._weight_decay)
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(bias, auto_update=True)
Пример #20
0
def test_roi_pool2d(node, rois, use_gpu):
    assert_cuda_active(use_gpu)
    node = Variable(node)
    layer = RoiPool2d(outh=7, outw=5, spatial_scale=0.6)

    def func(node, rois):
        return sum(layer(node, rois))
    compare(func, node, node, rois)
Пример #21
0
def test_average_poolnd(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)
    layer = AveragePoolNd()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
Пример #22
0
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.ones((1, size_o * 3), dtype=precision)
     # At this point, all connected units in the same layer will use the SAME weights
     self.params = {
         "w":
         Variable(self._initializer((size_i, size_o * 3)),
                  auto_update=True,
                  weight_decay=self._weight_decay),
         "u":
         Variable(self._initializer((1, size_o * 3)),
                  auto_update=True,
                  weight_decay=self._weight_decay),
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(bias, auto_update=True)
Пример #23
0
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.zeros((1, size_o * 4), dtype=precision)
     bias[:, size_o:size_o * 2] = 1
     self.params = {
         "w":
         Variable(self._initializer((size_i, size_o * 4)),
                  auto_update=True),
         "wr":
         Variable(self._initializer((size_o, size_o * 4)),
                  auto_update=True),
         "wc":
         Variable(self._initializer((1, size_o * 3)), auto_update=True),
         "b":
         Variable(bias, auto_update=True),
     }
Пример #24
0
def test_softsign(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.softsign(node))

    compare(func, node, node)
Пример #25
0
 def weight_initiallize(self, input_size):
     size_i = [
         1,
     ]
     size_i.extend(input_size)
     if self._mode == BATCH_NORMALIZE_FEATUREMAP and len(size_i) > 2:
         size_i[2] = 1
         size_i[3] = 1
     self.params = {
         "w":
         Variable(self._initializer(size_i).astype(precision),
                  auto_update=True,
                  weight_decay=self._weight_decay)
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(np.zeros(size_i, dtype=precision),
                                     auto_update=True)
Пример #26
0
def test_swish_activation(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(rm.swish(node))

    compare(func, node, node)
Пример #27
0
def test_softmax(node, x, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return rm.cross_entropy(rm.softmax(node), x)

    compare(func, node, node, x)
Пример #28
0
def test_smooth_l1_no_reduce(node, x, delta, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.smoothed_l1(node, x, delta, reduce_sum=False))

    compare(func, node, node, x)
Пример #29
0
def test_smooth_l1(node, x, delta, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return rm.smoothed_l1(node, x, delta)

    compare(func, node, node, x)
Пример #30
0
def test_maxout(node, axis, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(rm.maxout(node, axis=axis))

    compare(func, node, node)