Beispiel #1
0
 def test_nonzero_slope(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 2.4)
     expected = np.clip(bottom, 0.0, float('inf')) + \
         2.4 * np.clip(bottom, float('-inf'), 0.0)
     self._check(actual, expected)
Beispiel #2
0
    def setup(self, bottom, top):
        conv_param = self.layer_param.convolution_param

        channels, height, width = bottom[0].shape
        num_output = conv_param.num_output
        assert channels % self.group == 0, \
            "Number of channels should be a multiple of group."
        assert num_output % self.group == 0, \
            "Number of outputs should be a multiple of group."

        self.bias_term = conv_param.bias_term
        if self.weights is not None:
            logging.debug("Skipping parameter initialization")
        else:
            weights_shape = (num_output, channels // self.group, self.kernel_h,
                             self.kernel_w)
            weight_filler = conv_param.weight_filler
            if weight_filler.type == 'gaussian':
                self.weights = weight_filler.mean + weight_filler.std * \
                    Array.standard_normal(
                        weights_shape).astype(np.float32)
                self.weight_diff = Array.empty_like(self.weights)
            else:
                raise Exception("Filler not implemented for weight filler \
                    type {}".format(weight_filler.type))
            if self.bias_term:
                self.bias = Array((num_output, ), np.float32)
                self.bias_diff = Array.empty_like(self.bias)
                filler = conv_param.bias_filler
                if filler.type == 'constant':
                    self.bias.fill(filler.value)
                else:
                    raise Exception("Filler not implemented for bias filler \
                        type {}".format(filler.type))
Beispiel #3
0
    def test_simple_mul(self):
        a = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        actual = a * b
        expected = np.multiply(a, b)
        self._check(actual, expected)
Beispiel #4
0
    def setup(self, bottom, top):
        conv_param = self.layer_param.convolution_param

        channels, height, width = bottom[0].shape
        num_output = conv_param.num_output
        assert channels % self.group == 0, \
            "Number of channels should be a multiple of group."
        assert num_output % self.group == 0, \
            "Number of outputs should be a multiple of group."

        self.bias_term = conv_param.bias_term
        if self.weights is not None:
            logging.debug("Skipping parameter initialization")
        else:
            weights_shape = (num_output, channels // self.group,
                             self.kernel_h, self.kernel_w)
            weight_filler = conv_param.weight_filler
            if weight_filler.type == 'gaussian':
                self.weights = weight_filler.mean + weight_filler.std * \
                    Array.standard_normal(
                        weights_shape).astype(np.float32)
                self.weight_diff = Array.empty_like(self.weights)
            else:
                raise Exception("Filler not implemented for weight filler \
                    type {}".format(weight_filler.type))
            if self.bias_term:
                self.bias = Array((num_output, ), np.float32)
                self.bias_diff = Array.empty_like(self.bias)
                filler = conv_param.bias_filler
                if filler.type == 'constant':
                    self.bias.fill(filler.value)
                else:
                    raise Exception("Filler not implemented for bias filler \
                        type {}".format(filler.type))
 def test_gradient(self):
     for kernel_h in range(3, 5):
         for kernel_w in range(3, 5):
             channels = 12
             height = 3
             width = 5
             bottom = Array.zeros((5, channels, height, width), np.int32)
             bottom_diff = Array.zeros_like(bottom)
             for n in range(5):
                 for c in range(channels):
                     bottom[n, c] = Array.array(
                         [[1, 2, 5, 2, 3],
                          [9, 4, 1, 4, 8],
                          [1, 2, 5, 2, 3]]).astype(np.int32)
             param = self.layer[5]
             param.pooling_param.kernel_h = kernel_h
             param.pooling_param.kernel_w = kernel_w
             param.pooling_param.stride = 2
             param.pooling_param.pad = 1
             layer = PoolingLayer(param)
             top_shape = layer.get_top_shape(bottom)
             top = Array.zeros(top_shape, np.int32)
             top_diff = Array.zeros_like(top)
             checker = GradientChecker(1e-4, 1e-2)
             checker.check_gradient_exhaustive(layer, bottom, bottom_diff,
                                               top, top_diff)
Beispiel #6
0
 def test_nonzero_slope(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 2.4)
     expected = np.clip(bottom, 0.0, float('inf')) + \
         2.4 * np.clip(bottom, float('-inf'), 0.0)
     self._check(actual, expected)
Beispiel #7
0
    def test_simple_add(self):
        a = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        actual = a + b
        expected = np.add(a, b)
        self._check(actual, expected)
Beispiel #8
0
 def test_gradient(self):
     for kernel_h in range(3, 5):
         for kernel_w in range(3, 5):
             channels = 12
             height = 3
             width = 5
             bottom = Array.zeros((5, channels, height, width), np.int32)
             bottom_diff = Array.zeros_like(bottom)
             for n in range(5):
                 for c in range(channels):
                     bottom[n, c] = Array.array([[1, 2, 5, 2, 3],
                                                 [9, 4, 1, 4, 8],
                                                 [1, 2, 5, 2,
                                                  3]]).astype(np.int32)
             param = self.layer[5]
             param.pooling_param.kernel_h = kernel_h
             param.pooling_param.kernel_w = kernel_w
             param.pooling_param.stride = 2
             param.pooling_param.pad = 1
             layer = PoolingLayer(param)
             top_shape = layer.get_top_shape(bottom)
             top = Array.zeros(top_shape, np.int32)
             top_diff = Array.zeros_like(top)
             checker = GradientChecker(1e-4, 1e-2)
             checker.check_gradient_exhaustive(layer, bottom, bottom_diff,
                                               top, top_diff)
Beispiel #9
0
    def backward(self, bottom_data, bottom_diff, top_data, top_diff):
        padded_ratio = Array.zeros(1, bottom_data.shape[1] + self.size - 1,
                                   bottom_data.shape[2], bottom_data.shape[3])
        accum_ratio = Array.zeros(1, 1, bottom_data.shape[2],
                                  bottom_data.shape[3])
        accum_ratio_times_bottom = Array.zeros(1, 1, bottom_data.shape[2],
                                               bottom_data.shape[3])
        cache_ratio_value = 2.0 * self.apha * self.beta / self.size
        bottom_diff = np.pow(self.scale, -self.beta)
        bottom_diff *= top_diff

        inverse_pre_pad = self.size - (self.size + 1) / 2
        for n in range(bottom_data.shape[0]):
            padded_ratio[0, inverse_pre_pad] = top_diff[n] * top_data[n]
            padded_ratio[0, inverse_pre_pad] /= self.scale[n]
            accum_ratio.fill(0)
            for c in range(self.size - 1):
                accum_ratio += padded_ratio[0, c]

            for c in range(bottom_data.shape[1]):
                accum_ratio += padded_ratio[0, c + self.size - 1]
                accum_ratio_times_bottom += bottom_data[n, c] * accum_ratio
                bottom_data[n, c] += -cache_ratio_value * \
                    accum_ratio_times_bottom
                accum_ratio += -1 * padded_ratio[0, c]
Beispiel #10
0
    def test_blase(self):
        A = Array.rand(256, 256).astype(np.float32)
        x = Array.rand(256, 256).astype(np.float32)
        actual = Array.rand(256, 256).astype(np.float32)
        gemm(A, x, actual, 1.0, 0.0, 256, 256, 256)

        self._check(actual, np.dot(A, x))
Beispiel #11
0
 def test_simple(self):
     channels = 12
     height = 3
     width = 5
     bottom = Array.zeros((5, channels, height, width), np.int32)
     for n in range(5):
         for c in range(channels):
             bottom[n, c] = Array.array([[1, 2, 5, 2, 3], [9, 4, 1, 4, 8],
                                         [1, 2, 5, 2, 3]]).astype(np.int32)
     param = self.layer[5]
     param.pooling_param.kernel_size = 2
     param.pooling_param.stride = 1
     layer = PoolingLayer(param)
     actual_shape = layer.get_top_shape(bottom)
     actual = Array.zeros(actual_shape, np.int32)
     layer.setup(bottom, actual)
     layer.forward(bottom, actual)
     for n in range(5):
         for c in range(channels):
             np.testing.assert_array_equal(
                 actual[n, c],
                 np.array([[9, 5, 5, 8], [9, 5, 5, 8]]).astype(np.int32))
     bottom = Array.zeros_like(bottom)
     for n in range(5):
         for c in range(channels):
             actual[n, c] = Array.array([[1, 1, 1, 1],
                                         [1, 1, 1, 1]]).astype(np.int32)
     layer.backward(bottom, actual)
     for n in range(5):
         for c in range(channels):
             np.testing.assert_array_equal(
                 bottom[n, c],
                 np.array([[0, 0, 2, 0, 0], [2, 0, 0, 0, 2],
                           [0, 0, 2, 0, 0]]).astype(np.int32))
Beispiel #12
0
        def matrix_mult_sample():

            A = Array.array([[1, 2], [3, 4]])
            B = Array.array([[10, 3], [7, 4]])

            C = dot(A.transpose(), B.transpose())

            return C
Beispiel #13
0
        def matrix_mult_sample():

            B = Array.array([[10, 3], [7, 4]])

            # transposition
            A = Array.array([[1, 2], [3, 4]])
            A = Array.transpose(A)
            C = dot(B, A)
            return C
Beispiel #14
0
    def test_no_padding(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(5, 5) * 2).astype(np.float32)
        actual = Array.zeros((254, 254), np.float32)
        convolve(a, weights, actual, (0, 0), (1, 1))

        expected = signal.convolve(a, np.fliplr(np.flipud(weights)),
                                   mode='same')[2:, 2:]
        self._check(actual, expected)
Beispiel #15
0
    def test_simple(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(3, 3) * 2).astype(np.float32)
        actual = Array.zeros(a.shape, np.float32)
        convolve(a, weights, actual, (1, 1), (1, 1))

        expected = signal.convolve(a, np.fliplr(np.flipud(weights)),
                                   mode='same')

        self._check(actual, expected)
Beispiel #16
0
    def test_no_padding(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(5, 5) * 2).astype(np.float32)
        actual = Array.zeros((254, 254), np.float32)
        convolve(a, weights, actual, (0, 0), (1, 1))

        expected = signal.convolve(a,
                                   np.fliplr(np.flipud(weights)),
                                   mode='same')[2:, 2:]
        self._check(actual, expected)
Beispiel #17
0
    def test_simple(self):
        a = (Array.rand(256, 256) * 255).astype(np.float32)
        weights = (Array.rand(3, 3) * 2).astype(np.float32)
        actual = Array.zeros(a.shape, np.float32)
        convolve(a, weights, actual, (1, 1), (1, 1))

        expected = signal.convolve(a,
                                   np.fliplr(np.flipud(weights)),
                                   mode='same')

        self._check(actual, expected)
Beispiel #18
0
        def matrix_mult_sample():

            A = Array.array([[1, 2], [3, 4]])
            B = Array.array([[10, 3], [7, 4]])

            M = Array.transpose(A)

            M = Array.array([[1, 1], [3, 0]])

            C = dot(M, B)
            return C
Beispiel #19
0
        def matrix_mult_sample():

            A = Array.array([[1, 2], [3, 4]])
            B = Array.array([[10, 3], [7, 4]])

            # double transposition
            A = Array.transpose(A)
            B = Array.transpose(B)

            C = dot(A, B)
            return C
 def test_forward_simple(self):
     channels = 12
     height = 3
     width = 5
     bottom = Array.rand(
         5, channels, height, width).astype(np.float32)
     bottom = bottom * 256 - 128
     layer = ReluLayer(self.layer[3])
     actual = Array.zeros(layer.get_top_shape(bottom), np.float32)
     layer.forward(bottom, actual)
     expected = np.clip(bottom, 0.0, float('inf')).astype(np.float32)
     np.testing.assert_allclose(actual, expected)
Beispiel #21
0
    def test_multiple_nested(self):
        def matrix_mult_complex(A, B, C):
            return dot(dot(A, B), C)

        A = Array.rand(3, 3)
        B = Array.rand(3, 3)
        C = Array.rand(3, 3)

        expected = matrix_mult_complex(A, B, C)
        actual = dgemmify(matrix_mult_complex)(A, B, C)

        self._check(actual, expected)
    def _forward_test(self, param, in_shape):
        conv_param = param.convolution_param
        in_batch = Array.rand(*in_shape).astype(np.float32) * 255
        conv = ConvLayer(param)
        top_shape = conv.get_top_shape(in_batch)
        expected_conv = NaiveConv(conv_param)
        actual = Array.zeros(top_shape, np.float32)
        expected = Array.zeros(top_shape, np.float32)

        conv.setup(in_batch, actual)
        conv.forward(in_batch, actual)
        expected_conv(in_batch, conv.weights, conv.bias, expected)
        self._check(actual, expected)
Beispiel #23
0
    def test_simple(self):
        A = Array.rand(256, 256).astype(np.float32)
        x = Array.rand(256, 256).astype(np.float32)
        b = Array.rand(256, 256).astype(np.float32)

        @blasc
        def fn(A, x, b):
            v1 = T(A)
            v2 = dot(v1, x)
            v3 = v2 - b
            return v3

        self._check(fn(A, x, b), np.transpose(A).dot(x) - b)
Beispiel #24
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32) * 255
        actual_mask = Array.zeros((254, 254), np.float32)
        actual = Array.zeros((254, 254), np.float32)
        actual.fill(float('-inf'))
        expected_mask = Array.zeros((254, 254), np.float32)
        expected = Array.zeros((254, 254), np.float32)
        expected.fill(float('-inf'))

        max_pool(a, actual, actual_mask, (2, 2))
        py_max_pool(a, expected, expected_mask, (2, 2), (1, 1), (0, 0))
        self._check(actual, expected)
        self._check(actual_mask, expected_mask)
    def _forward_test(self, param, in_shape):
        conv_param = param.convolution_param
        in_batch = Array.rand(*in_shape).astype(np.float32) * 255
        conv = ConvLayer(param)
        top_shape = conv.get_top_shape(in_batch)
        expected_conv = NaiveConv(conv_param)
        actual = Array.zeros(top_shape, np.float32)
        expected = Array.zeros(top_shape, np.float32)

        conv.setup(in_batch, actual)
        conv.forward(in_batch, actual)
        expected_conv(in_batch, conv.weights, conv.bias, expected)
        self._check(actual, expected)
Beispiel #26
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32) * 255
        actual_mask = Array.zeros((254, 254), np.float32)
        actual = Array.zeros((254, 254), np.float32)
        actual.fill(float('-inf'))
        expected_mask = Array.zeros((254, 254), np.float32)
        expected = Array.zeros((254, 254), np.float32)
        expected.fill(float('-inf'))

        max_pool(a, actual, actual_mask, (2, 2))
        py_max_pool(a, expected, expected_mask,
                    (2, 2), (1, 1), (0, 0))
        self._check(actual, expected)
        self._check(actual_mask, expected_mask)
Beispiel #27
0
    def forward(self, bottom, top):
        # initialize scale to constant value
        self.scale.fill(self.k)

        padded_square = Array.zeros((bottom.shape[1] + self.size - 1,
                                     bottom.shape[2], bottom.shape[3]),
                                    bottom.dtype)

        alpha_over_size = self.alpha / self.size

        for n in range(bottom.shape[0]):
            padded_square[self.pre_pad:bottom.shape[1] + self.pre_pad] = \
                np.square(bottom[n])

            for c in range(self.size):
                self.scale[n] += alpha_over_size * padded_square[c]

            # for c in range(1, bottom.shape[1]):
            #     self.scale[n, c] = self.scale[n, c - 1] + \
            #         alpha_over_size * padded_square[c + self.size - 1] - \
            #         alpha_over_size * padded_square[c - 1]
            print('asdfasdf', c)
            self.scale[n, 1:] = self.scale[n, :-1] + \
                alpha_over_size * padded_square[self.size:self.size + bottom.shape[1] - 1] - \
                alpha_over_size * padded_square[:bottom.shape[1] - 1]

        top[:] = np.power(self.scale, -self.beta) * bottom
def main():

    # Smaller Dataset
    h = 1200  # height (number of rows, or column length)
    w = 1000  # width (number of columns, or row length)
    TOTAL_SIZE = h * w
    length = 100

    # Larger Dataset
    # TOTAL_SIZE = 500000000
    # h = 500000             # height (number of rows, or column length)
    # w = 1000               # width (number of columns, or row length)
    # length = 5000

    block_set = Array.array(list(range(TOTAL_SIZE)))  # sample dataset
    block_set = block_set.reshape(h, w)
    block_set = block_set.astype(np.float32)

    start_time = time.time()
    result = np.array(dcRemoval(block_set.flatten(), length, h).reshape(h, w))
    time_total = time.time() - start_time

    print "SEJITS dcRemoval Time: ", time_total, " seconds"
    print "RESULT: ", result
    return result
    def test_range_of_fifty(self):

        TOTAL_SIZE = 50
        height = 25             # height: (number of rows, or column length)
        width = 2               # width: (number of columns, or row length)
        pfov_length = 5

        # Creating a sample dataset for the SEJITS tests
        sejits_block_set = Array.array(list(range(TOTAL_SIZE)))
        sejits_block_set = sejits_block_set.reshape(height, width)
        sejits_block_set = sejits_block_set.astype(np.float32)

        # Creating a sample dataset for the Python tests
        pyop_block_set = np.array(list(range(TOTAL_SIZE)))
        pyop_block_set = pyop_block_set.reshape(height, width)
        pyop_block_set = pyop_block_set.astype(np.float32)

        python_result = dcRemPython(pyop_block_set.flatten(1), height, pfov_length).astype(
            np.float32).reshape((height, width), order='F').astype(np.int32)
        sejits_result = np.array(
            dcRemSejits(sejits_block_set.flatten(), pfov_length, height)).astype(
            np.float32).reshape((height, width)).astype(np.int32)

        print sejits_result

        self._check(python_result, sejits_result)
Beispiel #30
0
    def test_two_inputs(self):
        a = Array.rand(256, 256).astype(np.float32) * 255.0 - 128.0
        b = Array.rand(256, 256).astype(np.float32) * 255.0 - 128.0
        negative_slope = 0.0

        @smap2
        def fn(x, y):
            if x > 0:
                return y
            else:
                return negative_slope * y

        actual = fn(a, b)
        expected = b
        expected[a <= 0] *= negative_slope
        self._check(actual, expected)
Beispiel #31
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = transpose(a)
        expected = np.transpose(a)

        self._check(actual, expected)
Beispiel #32
0
    def test_simple_mul_scalar(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = a * 3.0
        expected = np.multiply(a, 3.0)
        self._check(actual, expected)

        actual = 3.0 * a
        self._check(actual, expected)
Beispiel #33
0
    def test_simple_add_scalar(self):
        a = Array.rand(256, 256).astype(np.float32)

        actual = a + 3.0
        expected = np.add(a, 3.0)
        self._check(actual, expected)

        actual = 3.0 + a
        self._check(actual, expected)
 def setup(self, bottom, top):
     weights_shape = (self.num_output, bottom.shape[0])
     weight_filler = self.layer_param.inner_product_param.weight_filler
     if weight_filler.type == 'gaussian':
         self.weights = weight_filler.mean + weight_filler.std * \
             Array.standard_normal(
                 weights_shape).astype(np.float32)
     else:
         raise Exception("Filler not implemented for weight filler"
                         "type {}".format(weight_filler.type))
 def test_simple(self):
     channels = 12
     height = 3
     width = 5
     bottom = Array.zeros((5, channels, height, width), np.int32)
     for n in range(5):
         for c in range(channels):
             bottom[n, c] = Array.array(
                 [[1, 2, 5, 2, 3],
                  [9, 4, 1, 4, 8],
                  [1, 2, 5, 2, 3]]).astype(np.int32)
     param = self.layer[5]
     param.pooling_param.kernel_size = 2
     param.pooling_param.stride = 1
     layer = PoolingLayer(param)
     actual_shape = layer.get_top_shape(bottom)
     actual = Array.zeros(actual_shape, np.int32)
     layer.setup(bottom, actual)
     layer.forward(bottom, actual)
     for n in range(5):
         for c in range(channels):
             np.testing.assert_array_equal(
                 actual[n, c],
                 np.array([
                     [9, 5, 5, 8],
                     [9, 5, 5, 8]
                 ]).astype(np.int32))
     bottom = Array.zeros_like(bottom)
     for n in range(5):
         for c in range(channels):
             actual[n, c] = Array.array(
                 [[1, 1, 1, 1],
                  [1, 1, 1, 1]]).astype(np.int32)
     layer.backward(bottom, actual)
     for n in range(5):
         for c in range(channels):
             np.testing.assert_array_equal(
                 bottom[n, c],
                 np.array([[0, 0, 2, 0, 0],
                           [2, 0, 0, 0, 2],
                           [0, 0, 2, 0, 0]]).astype(np.int32))
 def __init__(self, layer_param):
     super(InnerProductLayer, self).__init__(layer_param)
     param = self.layer_param.inner_product_param
     self.num_output = param.num_output
     self.bias_term = param.bias_term
     if self.bias_term:
         self.bias = Array.zeros(self.num_output)
         filler = param.bias_filler
         if filler.type == 'constant':
             self.bias.fill(filler.value)
         else:
             raise Exception("Filler not implemented for bias filler \
                 type {}".format(filler.type))
Beispiel #37
0
    def test_simple(self):
        a = Array.rand(256, 256).astype(np.float32)

        @smap
        def fn(x):
            if x > 0:
                return x
            else:
                return 0

        actual = fn(a)
        expected = np.copy(a)
        expected[expected < 0] = 0
        self._check(actual, expected)
    def test_backward_simple(self):
        channels = 12
        height = 3
        width = 5
        bottom = Array.rand(
            5, channels, height, width).astype(np.float32)
        bottom = bottom * 256 - 128

        top_diff = Array.rand(
            5, channels, height, width).astype(np.float32)
        top_diff = top_diff * 256 - 128

        top = np.zeros(top_diff.shape, np.float32)
        actual = Array.zeros(bottom.shape, np.float32)

        layer = ReluLayer(self.layer[3])
        layer.backward(bottom, actual, top, top_diff)

        expected = np.multiply(top_diff,
                               np.greater(bottom, Array.zeros(bottom.shape,
                                                              np.float32)))

        np.testing.assert_allclose(actual, expected)
Beispiel #39
0
 def test_simple(self):
     bottom = Array.rand(3, 8, 32, 32).astype(np.float32)
     actual = Array.zeros_like(bottom)
     layer = LRNLayer(self.layer[4])
     param = layer.layer_param.lrn_param
     alpha = param.alpha
     size = param.local_size
     beta = param.beta
     layer.setup(bottom, actual)
     layer.forward(bottom, actual)
     expected = Array.zeros_like(bottom)
     for n in range(bottom.shape[0]):
         for c in range(bottom.shape[1]):
             for h in range(bottom.shape[2]):
                 for w in range(bottom.shape[3]):
                     c_start = c - (size - 1) // 2
                     c_end = min(c_start + size, bottom.shape[1])
                     scale = 1
                     for i in range(c_start, c_end):
                         value = bottom[n, i, h, w]
                         scale += value * value * alpha / size
                     expected = bottom[n, c, h, w] / pow(scale, beta)
                     self.assertTrue(
                         abs(actual[n, c, h, w] - expected) < 1e-4)
Beispiel #40
0
    def forward(self, bottom_data, bottom_label, top):
        accuracy = 0
        data = bottom_data
        label = bottom_label
        dim = np.prod(data.shape) / data.shape[0]

        # Perform a partial sort to find top_k
        for i in range(data.shape[0]):
            vec = Array.array([[data[i * dim + j], j] for j in range(dim)])
            vec.partition((0, self.top_k))

            # If label is in top_k increase accuracy
            for k in range(self.top_k):
                if vec[k][1] == label[i]:
                    accuracy += 1

        top[0] = accuracy / data.shape[0]
    def forward(self, bottom_data, bottom_label, top):
        accuracy = 0
        data = bottom_data
        label = bottom_label
        dim = np.prod(data.shape) / data.shape[0]

        # Perform a partial sort to find top_k
        for i in range(data.shape[0]):
            vec = Array.array(
                [[data[i * dim + j], j] for j in range(dim)])
            vec.partition((0, self.top_k))

            # If label is in top_k increase accuracy
            for k in range(self.top_k):
                if vec[k][1] == label[i]:
                    accuracy += 1

        top[0] = accuracy / data.shape[0]
Beispiel #42
0
        def matrix_mult_sample():

            A = Array.array([[1, 2], [3, 4]])
            B = Array.array([[10, 3], [7, 4]])

            M = Array.transpose(A)
            N = Array.transpose(B)

            C = dot(M, N)

            # the double assignment after the dot call
            M = Array.array([[1, 1], [3, 0]])
            N = Array.array([[1, 0], [7, 0]])

            return C
Beispiel #43
0
        def matrix_mult_sample():

            A = Array.array([[1, 2], [3, 4]])
            B = Array.array([[10, 3], [7, 4]])

            A = Array.transpose(A)
            B = Array.transpose(B)

            C = dot(A, B)

            # the double transpose in place after the dot call
            A = Array.transpose(A)
            B = Array.transpose(B)

            return C
    def test_range_of_ten(self):

        TOTAL_SIZE = 10
        height = 10             # height: (number of rows, or column length)
        width = 1               # width: (number of columns, or row length)
        pfov_length = 5

        # Creating a sample dataset for the SEJITS tests
        sejits_block_set = Array.array(list(range(TOTAL_SIZE)))
        sejits_block_set = sejits_block_set.reshape(height, width)
        sejits_block_set = sejits_block_set.astype(np.float32)

        # Creating a sample dataset for the Python tests
        pyop_block_set = np.array(list(range(TOTAL_SIZE)))
        pyop_block_set = pyop_block_set.reshape(height, width)
        pyop_block_set = pyop_block_set.astype(np.float32)

        python_result = dcRemPython(pyop_block_set, height, pfov_length)
        sejits_result = np.array(dcRemSejits(sejits_block_set, pfov_length, height))

        self._check(python_result, sejits_result)
Beispiel #45
0
 def add_blob(self, blob, shape):
     self.blobs[blob] = Array.zeros(shape, np.float32)
 def setup(self, bottom, top):
     self.sum_multiplier = Array.ones((bottom.shape[1]), np.float32)
 def setup(self, bottom_data, bottom_label, top):
     self.prob = Array.zeros_like(bottom_data)
     self.softmax_layer.setup(bottom_data, self.prob)
Beispiel #48
0
class ConvLayer(BaseLayer):
    def __init__(self, param):
        super(ConvLayer, self).__init__(param)

        conv_param = param.convolution_param

        if conv_param.kernel_size:
            self.kernel_h = conv_param.kernel_size
            self.kernel_w = conv_param.kernel_size
        else:
            self.kernel_h = conv_param.kernel_h
            self.kernel_w = conv_param.kernel_w
        assert (self.kernel_h, self.kernel_w) > (0, 0), \
            "Filter dimensions cannot be zero."

        self.padding = (conv_param.pad, conv_param.pad)
        self.stride = (conv_param.stride, conv_param.stride)

        assert conv_param.num_output > 0, "Layer must have at least one output"

        self.group = conv_param.group

        self.weights = None
        self.bias_term = None
        self.bias = None
        self.kernel_size = self.kernel_h, self.kernel_w
        self.im2col = Im2Col(self.kernel_size, self.stride, self.padding)

    def get_top_shape(self, bottom):
        conv_param = self.layer_param.convolution_param
        height_out = (bottom.shape[2] + 2 * self.padding[0] - self.kernel_h) // \
            self.stride[0] + 1
        width_out = (bottom.shape[3] + 2 * self.padding[1] - self.kernel_w) // \
            self.stride[1] + 1
        return bottom.shape[0], conv_param.num_output, height_out, width_out

    def setup(self, bottom, top):
        conv_param = self.layer_param.convolution_param

        channels, height, width = bottom[0].shape
        num_output = conv_param.num_output
        assert channels % self.group == 0, \
            "Number of channels should be a multiple of group."
        assert num_output % self.group == 0, \
            "Number of outputs should be a multiple of group."

        self.bias_term = conv_param.bias_term
        if self.weights is not None:
            logging.debug("Skipping parameter initialization")
        else:
            weights_shape = (num_output, channels // self.group,
                             self.kernel_h, self.kernel_w)
            weight_filler = conv_param.weight_filler
            if weight_filler.type == 'gaussian':
                self.weights = weight_filler.mean + weight_filler.std * \
                    Array.standard_normal(
                        weights_shape).astype(np.float32)
                self.weight_diff = Array.empty_like(self.weights)
            else:
                raise Exception("Filler not implemented for weight filler \
                    type {}".format(weight_filler.type))
            if self.bias_term:
                self.bias = Array((num_output, ), np.float32)
                self.bias_diff = Array.empty_like(self.bias)
                filler = conv_param.bias_filler
                if filler.type == 'constant':
                    self.bias.fill(filler.value)
                else:
                    raise Exception("Filler not implemented for bias filler \
                        type {}".format(filler.type))

    # @meta
    def forward(self, bottom, top):
        weights = self.weights.reshape(self.weights.shape[0],
                                       np.prod(self.weights.shape[1:]))
        for bottom_data, top_data in zip(bottom, top):
            self.col_data = self.im2col(
                bottom_data, self.kernel_size, self.padding, self.stride)
            col_offset = self.col_data.shape[0] // self.group
            weight_offset = top_data.shape[0] // self.group
            top_offset = top_data.shape[0] // self.group
            for g in range(self.group):
                top_data[g * top_offset:(g + 1) * top_offset] = (
                    weights[g * weight_offset:(g + 1) * weight_offset].dot(
                        self.col_data[g * col_offset:(g + 1) * col_offset])
                ).reshape(top_data[g * top_offset:(g + 1) * top_offset].shape)

            if self.bias_term:
                for output_data, bias in zip(top_data, self.bias):
                    output_data += bias
        # out_groups = top.shape[1] // self.group
        # in_groups = bottom.shape[1] // self.group
        # for i in range(len(top)):
        #     for group in range(self.group):
        #         for out_group in range(out_groups):
        #             for in_group in range(in_groups):
        #                 convolve(
        #                     bottom[i, in_group + group * in_groups],
        #                     self.weights[out_group + group * out_groups,
        #                                  in_group],
        #                     top[i, out_group + group * out_groups],
        #                     self.padding, self.stride)
        #     if self.bias_term:
        #         for j in range(len(self.bias)):
        #             # TODO: Add support for sugar
        #             # top[i, j] += self.bias[j]
        #             top[i, j] += self.bias[j]

    def backward(self, top_data, top_diff, bottom_data, bottom_diff):
        if self.propagate_down:
            self.weight_diff.fill(0)
            if self.bias is not None:
                self.bias_diff.fill(0)

        for i in range(top_data.shape[0]):
            if self.bias is not None and self.propagate_down:
                curr_bias_diff = self.bias_diff[i]
                for n in range(top_data.shape[1]):
                    for data, bias_diff in zip(top_data[i, n], curr_bias_diff):
                        top_data += bias_diff
            if self.propagate_down:
                for n in range(top_data.shape[1]):
                    self.col_data = self.im2col(bottom_data, self.kernel_size,
                                                self.padding, self.stride)
                    col_offset = self.col_data.shape[0] // self.group
                    weight_offset = top_data.shape[0] // self.group
                    top_offset = top_data.shape[0] // self.group
                    # TODO: Clean this up with helper functions
                    for g in range(self.group):
                        self.weight_diff[i, g * weight_offset:(g + 1) * weight_offset] += \
                            top_diff[i, g * top_offset:
                                     (g + 1) * top_offset].dot(
                                self.col_data[
                                    i, g * col_offset:(g + 1) * col_offset])

                    for g in range(self.group):
                        self.col_data[g * col_offset:(g + 1) * col_offset] = \
                            self.weights[
                                g * weight_offset:(g + 1) * weight_offset].dot(
                                    self.top_diff[n, g * top_offset:
                                                  (g + 1) * top_offset])
                    bottom_diff[i] = self.col2im(self.col_data,
                                                 self.kernel_size,
                                                 self.padding, self.stride)
Beispiel #49
0
 def test_simple(self):
     bottom = Array.rand(256, 256).astype(np.float32) * 255
     actual = Array.zeros(bottom.shape, np.float32)
     relu(bottom, bottom, actual, 0.0)
     expected = np.clip(bottom, 0.0, float('inf'))
     self._check(actual, expected)
Beispiel #50
0
 def setup(self, bottom, top):
     self.scale = Array.zeros_like(bottom)
Beispiel #51
0
 def __call__(self, *args):
     output = Array.zeros(self.out_shape, args[0].dtype)
     self._c_function(args[0], output)
     return output
Beispiel #52
0
 def __call__(self, *args):
     output = Array.empty(self.out_shape, np.float32)
     self._c_function(args[0], output)
     return output