示例#1
0
def test_gpu_node_dot(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    g3 = dot(g1, g2)
    g4 = rm.sum(g3)
    g = g4.grad()
    g_g1 = g.get(g1)
    g_g2 = g.get(g2)
    g_g3 = g.get(g3)
    g3.to_cpu()
    g4.to_cpu()

    set_cuda_active(False)
    c3 = dot(g1, g2)
    c4 = rm.sum(c3)
    c = c4.grad()
    c_g1 = c.get(g1)
    c_g2 = c.get(g2)
    c_c3 = c.get(c3)

    close(g3, c3)
    close(g4, c4)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_c3, g_g3)
示例#2
0
def test_layer_normalize(node, use_gpu):
    node = Variable(node * 50)
    assert_cuda_active(use_gpu)

    layer = LayerNormalize()
    layer2 = Dense(4)
    layer3 = Conv2d(channel=3)

    def func(node):
        ret = layer(node)
        if len(ret.shape) > 2:
            return sum(layer3(ret))
        else:
            return sum(layer2(ret))
    a = 1e-5
    r = 1e-3
    if use_gpu:
        a = 1e-2
        r = 1e-3
    for trial in range(3):
        try:
            compare(func, node, node, atol=a, rtol=r)
            compare(func, layer.params["gain"], node)
            compare(func, layer.params["bias"], node)
            return
        except:
            node = Variable(rand(node.shape))
    assert False
示例#3
0
def test_max_unpoolnd(node, use_gpu):
    assert_cuda_active(use_gpu)
    node = Variable(node)

    l0 = MaxPoolNd(kernel=2, padding=1, stride=1)
    l1 = MaxUnPoolNd()
    l2 = Dense(2)
    np.set_printoptions(suppress=True)

    def func(node):
        ret = node
        reta = l0(node)
        ret = l1(reta, reta)
        ret = l2(ret.reshape(ret.shape[0], -1))
        ret = sum(ret)
        return ret

    for trial in range(3):
        try:
            compare(func, node, node)
            return
        except AssertionError as e:
            print(e)
            node = Variable(rand(node.shape))
    raise AssertionError("Failed all attempts.")
示例#4
0
def test_gpu_broadcast(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    assert np.allclose(a + b, (g1 + g2))
示例#5
0
def test_copy_from_cpu():
    src = Variable(rand((100, )))

    dest = Variable(rand((100, )))
    dest.copy_from(src)

    close(src, dest)
示例#6
0
    def weight_initiallize(self, input_size):
        # The first dimension is to allow different types of uncorrelated images as inputs, such as RGB information.
        # After this dimension, the image data is assumed to be meaningfully correlated.
        self._dims = len(input_size[1:])
        if is_cuda_active():
            assert self._dims < 4, "GPU Version currently only supports 2 and 3 dimensions"

        if self._dims == 1 and is_cuda_active():
            padding, stride, filter = self._initial_value
            self._kernel = np.append(filter, 1).astype(np.int32)
            self._padding = np.append(padding, 0).astype(np.int32)
            self._stride = np.append(stride, 1).astype(np.int32)

        def func(var):
            return check_input(var, self._dims)
        self._kernel, self._padding, self._stride = map(
            func, [self._kernel, self._padding, self._stride])

        assert all([s > 0 for s in input_size[1:]]), \
            "The shape of input array {} is too small. Please give an array which size is lager than 0.".format(
                input_size[1:])

        f_lst = [self._channel, input_size[0]]
        f_lst.extend(self._kernel)
        size_f = tuple(f_lst)
        size_b = tuple([1, self._channel] + [1 for _ in range(self._dims)])

        self.params = {"w": Variable(self._initializer(size_f), auto_update=True)}
        if not self._ignore_bias:
            self.params["b"] = Variable(np.zeros(size_b), auto_update=True)
示例#7
0
 def weight_initiallize(self, input_size):
     size_i = input_size[0] if isinstance(input_size, tuple) else input_size
     size_o = self._output_size
     self.params = {
         "w":
         Variable(self._initializer((size_i, size_o)), auto_update=True),
         "b":
         Variable(np.zeros((1, size_o)).astype(precision), auto_update=True)
     }
示例#8
0
文件: lstm.py 项目: AnakTeka/ReNom
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.zeros((1, size_o * 4), dtype=precision)
     bias[:, size_o:size_o * 2] = 1
     self.params = {
         "w": Variable(self._initializer((size_i, size_o * 4)), auto_update=True),
         "wr": Variable(self._initializer((size_o, size_o * 4)), auto_update=True),
         "b": Variable(bias, auto_update=True),
     }
示例#9
0
def test_node_dump():
    DEBUG_GRAPH_INIT(True)

    a = Variable(np.array([1, 2, 3, 4, 5]))
    b = Variable(np.array([1, 2, 3, 4, 5]))
    c = a + b  # NOQA

    d = a + b * 2  # NOQA

    DEBUG_NODE_STAT()
示例#10
0
def test_dot(node, x, use_gpu):
    node = Variable(node)
    x = Variable(x)

    assert_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.dot(node, x))
    compare(func, node, node, x)
    compare(func, x, node, x)
示例#11
0
 def weight_initiallize(self, input_size):
     size_f = (self._channel, input_size[0], self._kernel[0],
               self._kernel[1])
     self.params = {
         "w":
         Variable(self._initializer(size_f), auto_update=True),
         "b":
         Variable(np.zeros((1, self._channel, 1, 1), dtype=precision),
                  auto_update=True)
     }
示例#12
0
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.zeros((1, size_o * 4), dtype=precision)
     bias[:, size_o:size_o * 2] = 1
     self.params = {
         "w": Variable(self._initializer((size_i, size_o * 4)), auto_update=True, weight_decay=self._weight_decay),
         "wr": Variable(self._initializer((size_o, size_o * 4)), auto_update=True, weight_decay=self._weight_decay),
         "wc": Variable(self._initializer((1, size_o * 3)), auto_update=True, weight_decay=self._weight_decay)}
     if not self._ignore_bias:
         self.params["b"] = Variable(bias, auto_update=True)
 def weight_initiallize(self, size_i):
     size_i = size_i[0]
     size_o = self._size_o
     bias = np.zeros((1, size_o * 3), dtype=precision)
     # At this point, all connected units in the same layer will use the SAME weights
     self.params = {
         "w": Variable(self._initializer((size_i, size_o * 3)), auto_update=True, weight_decay=self._weight_decay),
         "u": Variable(self._initializer((size_o, size_o * 3)), auto_update=True, weight_decay=self._weight_decay),
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(bias, auto_update=True)
示例#14
0
def test_node_clear():
    DEBUG_GRAPH_INIT(True)

    a = Variable(np.random.rand(2, 2).astype(np.float32))
    b = Variable(np.random.rand(2, 2).astype(np.float32))

    layer = R.Lstm(2)

    c = layer(O.dot(a, b))  # NOQA

    DEBUG_NODE_STAT()
示例#15
0
 def weight_initiallize(self, input_size):
     size_f = (input_size[0], self._channel, self._kernel[0],
               self._kernel[1])
     self.params = {
         "w":
         Variable(self._initializer(size_f),
                  auto_update=True,
                  weight_decay=self._weight_decay)
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(np.zeros((1, self._channel, 1, 1),
                                              dtype=precision),
                                     auto_update=True)
示例#16
0
def test_copy_from_gpu():
    set_cuda_active(True)

    src = Variable(rand((100, )))
    src.to_gpu()

    dest = Variable(rand((100, )))
    dest.to_gpu()

    dest.copy_from(src)
    close(src, dest)

    close(src._gpu.new_array(), dest._gpu.new_array())
示例#17
0
def test_mean_squared_error_no_reduce(node, x, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.mean_squared_error(node, x, reduce_sum=False))
    for trial in range(3):
        try:
            compare(func, node, node, x)
            return
        except AssertionError:
            node = Variable(rand(node.shape))
    assert False
示例#18
0
 def weight_initiallize(self, input_size):
     size_i = input_size[0] if isinstance(input_size, tuple) else input_size
     size_o = self._output_size
     self.params = {
         "w":
         Variable(self._initializer((size_i, size_o)),
                  auto_update=True,
                  weight_decay=self._weight_decay)
     }
     if not self._ignore_bias:
         self.params["b"] = Variable(np.zeros(
             (1, size_o)).astype(precision),
                                     auto_update=True)
示例#19
0
def test_copy_from_another_gpu():
    set_cuda_active(True)

    src = Variable(rand((100, )))
    src.to_gpu()

    with use_device(1):
        dest = Variable(rand((100, )))
        dest.to_gpu()

    dest.copy_from(src)
    close(src, dest)

    close(src._gpu.new_array(), dest._gpu.new_array())
示例#20
0
 def weight_initiallize(self, input_size):
     size_i = [
         1,
     ]
     size_i.extend(input_size)
     if self._mode == BATCH_NORMALIZE_FEATUREMAP and len(size_i) > 2:
         size_i[2] = 1
         size_i[3] = 1
     self.params = {
         "w":
         Variable(self._initializer(size_i).astype(precision),
                  auto_update=True),
         "b":
         Variable(np.zeros(size_i, dtype=precision), auto_update=True)
     }
示例#21
0
def test_max_pool2d(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    layer = MaxPool2d(filter=2, padding=1, stride=2)

    def func(node):
        return sum(layer(node))
    for trial in range(3):
        try:
            compare(func, node, node)
            return
        except AssertionError:
            node = Variable(rand(node.shape))
    raise AssertionError("Failed all three attempts.")
示例#22
0
def test_div(node, x, raise_error, use_gpu):
    node = Variable(node)
    x = np.array(x)
    assert_cuda_active(use_gpu)

    def func_div1(node, x):
        return sum(x / node)
    compare(func_div1, node, node, x)

    def func_div2(node, x):
        return sum(node / x)
    compare(func_div2, node, node, x)

    def func_idiv1(node, x):
        node /= x
        return sum(node)
    try:
        compare(func_idiv1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_idiv2(node, x):
        x /= node
        return sum(node)
    try:
        compare(func_idiv2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
示例#23
0
def test_softsign(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.softsign(node))
    compare(func, node, node)
示例#24
0
def test_swish_activation(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(rm.swish(node))
    compare(func, node, node)
示例#25
0
def test_softmax(node, x, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return rm.cross_entropy(rm.softmax(node), x)
    compare(func, node, node, x)
示例#26
0
def test_leaky_relu_activation(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(rm.leaky_relu(node))
    compare(func, node, node)
示例#27
0
def test_mul(node, x, raise_error, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func_mul1(node, x):
        return sum(x * node)
    compare(func_mul1, node, node, x)

    def func_mul2(node, x):
        return sum(node * x)
    compare(func_mul2, node, node, x)

    def func_imul1(node, x):
        node *= x
        return sum(node)
    try:
        compare(func_imul1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_imul2(node, x):
        x *= node
        return sum(node)
    try:
        compare(func_imul2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
示例#28
0
def test_smooth_l1_no_reduce(node, x, delta, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.smoothed_l1(node, x, delta, reduce_sum=False))
    compare(func, node, node, x)
    def _oper_cpu(cls, x, pz, w, u, b):
        # Initialize Variables
        m = w.shape[1] // 3
        w_z, w_r, w_h = np.split(w, [m, m * 2, ], axis=1)
        u_z, u_r, u_h = np.split(u, [m, m * 2], axis=1)
        hminus = Variable(np.zeros((x.shape[0], w.shape[1] // 3),
                                   dtype=precision)) if pz is None else pz

        b_z, b_r, b_h = np.split(b, [m, m * 2], axis=1) if b is not None else (0, 0, 0)
        A = dot(x, w_z) + dot(hminus, u_z) + b_z
        B = dot(x, w_r) + dot(hminus, u_r) + b_r
        C = dot(x, w_h) + sigmoid(B) * dot(hminus, u_h) + b_h

        h = sigmoid(A) * hminus + (1 - sigmoid(A)) * tanh(C)

        # Store Variables for Graph
        ret = cls._create_node(h)
        ret.attrs._x = x
        ret.attrs._w = w
        ret.attrs._w_z = w_z
        ret.attrs._w_r = w_r
        ret.attrs._w_h = w_h
        ret.attrs._u = u
        ret.attrs._u_z = u_z
        ret.attrs._u_h = u_h
        ret.attrs._u_r = u_r
        ret.attrs._pz = hminus
        ret.attrs._A = A
        ret.attrs._B = B
        ret.attrs._C = C

        if b is not None:
            ret.attrs._b = b

        return ret
示例#30
0
def test_sigmoid_activation(node, use_gpu):
    node = Variable(node)
    assert_cuda_active(use_gpu)

    def func(node):
        return sum(sigmoid(node))
    compare(func, node, node)