Beispiel #1
0
    def test_simple_mul(self):
        a = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        actual = a * b
        expected = np.multiply(a, b)
        self._check(actual, expected)
Beispiel #2
0
    def test_simple_add(self):
        a = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        actual = a + b
        expected = np.add(a, b)
        self._check(actual, expected)
Beispiel #3
0
    def test_blase(self):
        A = Array.rand(256, 256).astype(np.float32)
        x = Array.rand(256, 256).astype(np.float32)
        actual = Array.rand(256, 256).astype(np.float32)
        gemm(A, x, actual, 1.0, 0.0, 256, 256, 256)

        self._check(actual, np.dot(A, x))
Beispiel #4
0
    def test_no_padding(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(5, 5) * 2).astype(np.float32)
        actual = Array.zeros((254, 254), np.float32)
        convolve(a, weights, actual, (0, 0), (1, 1))

        expected = signal.convolve(a, np.fliplr(np.flipud(weights)),
                                   mode='same')[2:, 2:]
        self._check(actual, expected)
Beispiel #5
0
    def test_no_padding(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(5, 5) * 2).astype(np.float32)
        actual = Array.zeros((254, 254), np.float32)
        convolve(a, weights, actual, (0, 0), (1, 1))

        expected = signal.convolve(a,
                                   np.fliplr(np.flipud(weights)),
                                   mode='same')[2:, 2:]
        self._check(actual, expected)
Beispiel #6
0
    def test_simple(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(3, 3) * 2).astype(np.float32)
        actual = Array.zeros(a.shape, np.float32)
        convolve(a, weights, actual, (1, 1), (1, 1))

        expected = signal.convolve(a, np.fliplr(np.flipud(weights)),
                                   mode='same')

        self._check(actual, expected)
Beispiel #7
0
    def test_simple(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(3, 3) * 2).astype(np.float32)
        actual = Array.zeros(a.shape, np.float32)
        convolve(a, weights, actual, (1, 1), (1, 1))

        expected = signal.convolve(a,
                                   np.fliplr(np.flipud(weights)),
                                   mode='same')

        self._check(actual, expected)
Beispiel #8
0
    def test_multiple_nested(self):
        def matrix_mult_complex(A, B, C):
            return dot(dot(A, B), C)

        A = Array.rand(3, 3)
        B = Array.rand(3, 3)
        C = Array.rand(3, 3)

        expected = matrix_mult_complex(A, B, C)
        actual = dgemmify(matrix_mult_complex)(A, B, C)

        self._check(actual, expected)
Beispiel #9
0
    def test_simple(self):
        A = Array.rand(256, 256).astype(np.float32)
        x = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        @blasc
        def fn(A, x, b):
            v1 = T(A)
            v2 = dot(v1, x)
            v3 = v2 - b
            return v3

        self._check(fn(A, x, b), np.transpose(A).dot(x) - b)
Beispiel #10
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = transpose(a)
        expected = np.transpose(a)

        self._check(actual, expected)
Beispiel #11
0
 def test_nonzero_slope(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 2.4)
     expected = np.clip(bottom, 0.0, float('inf')) + \
         2.4 * np.clip(bottom, float('-inf'), 0.0)
     self._check(actual, expected)
Beispiel #12
0
 def test_nonzero_slope(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 2.4)
     expected = np.clip(bottom, 0.0, float('inf')) + \
         2.4 * np.clip(bottom, float('-inf'), 0.0)
     self._check(actual, expected)
Beispiel #13
0
    def test_two_inputs(self):
        a = Array.rand(256, 256).astype(np.float32) * 255.0 - 128.0
        b = Array.rand(256, 256).astype(np.float32) * 255.0 - 128.0
        negative_slope = 0.0

        @smap2
        def fn(x, y):
            if x > 0:
                return y
            else:
                return negative_slope * y

        actual = fn(a, b)
        expected = b
        expected[a <= 0] *= negative_slope
        self._check(actual, expected)
Beispiel #14
0
    def test_simple_add_scalar(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = a + 3.0
        expected = np.add(a, 3.0)
        self._check(actual, expected)

        actual = 3.0 + a
        self._check(actual, expected)
Beispiel #15
0
    def test_simple_mul_scalar(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = a * 3.0
        expected = np.multiply(a, 3.0)
        self._check(actual, expected)

        actual = 3.0 * a
        self._check(actual, expected)
 def test_forward_simple(self):
     channels = 12
     height = 3
     width = 5
     bottom = Array.rand(
         5, channels, height, width).astype(np.float32)
     bottom = bottom * 256 - 128
     layer = ReluLayer(self.layer[3])
     actual = Array.zeros(layer.get_top_shape(bottom), np.float32)
     layer.forward(bottom, actual)
     expected = np.clip(bottom, 0.0, float('inf')).astype(np.float32)
     np.testing.assert_allclose(actual, expected)
    def _forward_test(self, param, in_shape):
        conv_param = param.convolution_param
        in_batch = Array.rand(*in_shape).astype(np.float32) * 255
        conv = ConvLayer(param)
        top_shape = conv.get_top_shape(in_batch)
        expected_conv = NaiveConv(conv_param)
        actual = Array.zeros(top_shape, np.float32)
        expected = Array.zeros(top_shape, np.float32)

        conv.setup(in_batch, actual)
        conv.forward(in_batch, actual)
        expected_conv(in_batch, conv.weights, conv.bias, expected)
        self._check(actual, expected)
Beispiel #18
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32) * 255
        actual_mask = Array.zeros((254, 254), np.float32)
        actual = Array.zeros((254, 254), np.float32)
        actual.fill(float('-inf'))
        expected_mask = Array.zeros((254, 254), np.float32)
        expected = Array.zeros((254, 254), np.float32)
        expected.fill(float('-inf'))

        max_pool(a, actual, actual_mask, (2, 2))
        py_max_pool(a, expected, expected_mask, (2, 2), (1, 1), (0, 0))
        self._check(actual, expected)
        self._check(actual_mask, expected_mask)
    def _forward_test(self, param, in_shape):
        conv_param = param.convolution_param
        in_batch = Array.rand(*in_shape).astype(np.float32) * 255
        conv = ConvLayer(param)
        top_shape = conv.get_top_shape(in_batch)
        expected_conv = NaiveConv(conv_param)
        actual = Array.zeros(top_shape, np.float32)
        expected = Array.zeros(top_shape, np.float32)

        conv.setup(in_batch, actual)
        conv.forward(in_batch, actual)
        expected_conv(in_batch, conv.weights, conv.bias, expected)
        self._check(actual, expected)
Beispiel #20
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32)

        @smap
        def fn(x):
            if x > 0:
                return x
            else:
                return 0

        actual = fn(a)
        expected = np.copy(a)
        expected[expected < 0] = 0
        self._check(actual, expected)
Beispiel #21
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32) * 255
        actual_mask = Array.zeros((254, 254), np.float32)
        actual = Array.zeros((254, 254), np.float32)
        actual.fill(float('-inf'))
        expected_mask = Array.zeros((254, 254), np.float32)
        expected = Array.zeros((254, 254), np.float32)
        expected.fill(float('-inf'))

        max_pool(a, actual, actual_mask, (2, 2))
        py_max_pool(a, expected, expected_mask,
                    (2, 2), (1, 1), (0, 0))
        self._check(actual, expected)
        self._check(actual_mask, expected_mask)
    def test_backward_simple(self):
        channels = 12
        height = 3
        width = 5
        bottom = Array.rand(
            5, channels, height, width).astype(np.float32)
        bottom = bottom * 256 - 128

        top_diff = Array.rand(
            5, channels, height, width).astype(np.float32)
        top_diff = top_diff * 256 - 128

        top = np.zeros(top_diff.shape, np.float32)
        actual = Array.zeros(bottom.shape, np.float32)

        layer = ReluLayer(self.layer[3])
        layer.backward(bottom, actual, top, top_diff)

        expected = np.multiply(top_diff,
                               np.greater(bottom, Array.zeros(bottom.shape,
                                                              np.float32)))

        np.testing.assert_allclose(actual, expected)
Beispiel #23
0
 def test_simple(self):
     bottom = Array.rand(3, 8, 32, 32).astype(np.float32)
     actual = Array.zeros_like(bottom)
     layer = LRNLayer(self.layer[4])
     param = layer.layer_param.lrn_param
     alpha = param.alpha
     size = param.local_size
     beta = param.beta
     layer.setup(bottom, actual)
     layer.forward(bottom, actual)
     expected = Array.zeros_like(bottom)
     for n in range(bottom.shape[0]):
         for c in range(bottom.shape[1]):
             for h in range(bottom.shape[2]):
                 for w in range(bottom.shape[3]):
                     c_start = c - (size - 1) // 2
                     c_end = min(c_start + size, bottom.shape[1])
                     scale = 1
                     for i in range(c_start, c_end):
                         value = bottom[n, i, h, w]
                         scale += value * value * alpha / size
                     expected = bottom[n, c, h, w] / pow(scale, beta)
                     self.assertTrue(
                         abs(actual[n, c, h, w] - expected) < 1e-4)
Beispiel #24
0
 def test_simple(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 0.0)
     expected = np.clip(bottom, 0.0, float('inf'))
     self._check(actual, expected)
Beispiel #25
0
 def test_simple(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 0.0)
     expected = np.clip(bottom, 0.0, float('inf'))
     self._check(actual, expected)