Пример #1
0
    def test_find_input_match(self):
        """ Test to determine input rectangle match given pixel ranges for input data and input match"""

        batch_index = 0
        input_data = np.array(range(8 * 8)).reshape([1, 1, 8, 8])
        kernel_size = (3, 3)

        layer_attributes = (kernel_size, )

        # bottom right corner
        pixel_range_for_data = ((6, 9), (6, 9))
        pixel_range_for_match = ((0, 2), (0, 2))

        input_match = InputMatchSearch._find_input_match(
            input_data[batch_index], layer_attributes, pixel_range_for_data,
            pixel_range_for_match)

        self.assertEqual(np.sum(input_match), 54 + 55 + 62 + 63)

        # bottom left corner
        pixel_range_for_data = ((6, 9), (0, 2))
        pixel_range_for_match = ((0, 2), (1, 3))
        input_match = InputMatchSearch._find_input_match(
            input_data[batch_index], layer_attributes, pixel_range_for_data,
            pixel_range_for_match)
        self.assertEqual(np.sum(input_match), 48 + 49 + 56 + 57)

        # top right corner
        pixel_range_for_data = ((0, 2), (6, 9))
        pixel_range_for_match = ((1, 3), (0, 2))
        input_match = InputMatchSearch._find_input_match(
            input_data[batch_index], layer_attributes, pixel_range_for_data,
            pixel_range_for_match)

        self.assertEqual(np.sum(input_match), 6 + 7 + 14 + 15)

        # top left corner
        pixel_range_for_data = ((0, 2), (0, 2))
        pixel_range_for_match = ((1, 3), (1, 3))
        input_match = InputMatchSearch._find_input_match(
            input_data[batch_index], layer_attributes, pixel_range_for_data,
            pixel_range_for_match)

        self.assertEqual(np.sum(input_match), 0 + 1 + 8 + 9)

        # middle
        pixel_range_for_data = ((3, 6), (3, 6))
        pixel_range_for_match = ((0, 3), (0, 3))
        input_match = InputMatchSearch._find_input_match(
            input_data[batch_index], layer_attributes, pixel_range_for_data,
            pixel_range_for_match)

        self.assertEqual(np.sum(input_match),
                         27 + 28 + 29 + 35 + 36 + 37 + 43 + 44 + 45)
Пример #2
0
    def test_subsample_data(self, np_choice_function):
        """Test to subsample input match for random output pixel (1, 1) and corresponding input match"""
        # randomly selected output pixel (height, width) is fixed here and it is (1, 1)
        np_choice_function.return_value = [1]

        model = TestNet()

        input_data = np.arange(0, 1440).reshape((2, 5, 12, 12))
        output_data = np.arange(0, 1280).reshape((2, 10, 8, 8))

        conv2 = model.conv2
        layer_attributes = (conv2.kernel_size, conv2.stride, conv2.padding)

        sub_sample_input, sub_sample_output = InputMatchSearch.subsample_data(
            layer_attributes=layer_attributes,
            input_data=input_data,
            output_data=output_data,
            samples_per_image=1)

        # compare the inputs for both batches
        self.assertEqual(sub_sample_input.shape, (2, 5, 5, 5))
        self.assertTrue(
            np.array_equal(sub_sample_input[0, :, :, :], input_data[0, :, 1:6,
                                                                    1:6]))
        self.assertTrue(
            np.array_equal(sub_sample_input[1, :, :, :], input_data[1, :, 1:6,
                                                                    1:6]))

        # compare the output for batches
        output_pixel = (1, 1)
        self.assertEqual(sub_sample_output.shape, (2, 10))
        self.assertTrue(
            np.array_equal(sub_sample_output,
                           output_data[:, :, output_pixel[0],
                                       output_pixel[1]]))
Пример #3
0
    def test__determine_output_pixel_height_width_range_for_random_selection(
            self):

        strides = [[1, 1], [2, 2], [1, 2], [2, 1]]
        kernel_size_options = [[1, 1], [2, 2], [3, 3], [1, 3], [3, 1]]
        padding_options = [[0, 0], [1, 1], [2, 2], [1, 2], [2, 1], [3, 3]]

        all_options = [kernel_size_options, strides, padding_options]
        output_data_shape = (0, 1, 8, 8)
        for kernel_size, stride, padding in itertools.product(*all_options):

            layer_attributes = kernel_size, stride, padding
            height_range, width_range = \
                InputMatchSearch._determine_output_pixel_height_width_range_for_random_selection(
                    layer_attributes, output_data_shape)
            start, end = height_range
            if kernel_size[0] >= padding[0]:
                assert start == 0 and end == output_data_shape[2]
            else:
                assert start == padding[0] and end == (output_data_shape[2] -
                                                       padding[0])

            start, end = width_range
            if kernel_size[1] >= padding[1]:
                assert start == 0 and end == output_data_shape[3]
            else:
                assert start == padding[1] and end == (output_data_shape[3] -
                                                       padding[1])
Пример #4
0
    def test_subsample_data_channels_last(self, np_choice_function):
        """
        Test to subsample input match for random output pixel (1, 1) and corresponding input match
        """
        tf.compat.v1.reset_default_graph()

        # randomly selected output pixel (height, width) is fixed here and it is (1, 1)
        np_choice_function.return_value = [1]

        # input_data and output_data are in channels_first format, similar to Pytorch format
        input_data = np.arange(0, 1440).reshape((2, 12, 12, 5))
        output_data = np.arange(0, 1280).reshape((2, 8, 8, 10))

        g = tf.Graph()
        with g.as_default():
            inp_tensor = tf.Variable(initial_value=input_data,
                                     name='inp_tensor',
                                     dtype=tf.float32)
            filter_tensor = tf.compat.v1.get_variable(
                'filter_tensor',
                shape=[5, 5, 5, 10],
                initializer=tf.random_normal_initializer())
            conv1 = tf.nn.conv2d(input=inp_tensor,
                                 filter=filter_tensor,
                                 strides=[1, 1, 1, 1],
                                 padding='VALID',
                                 data_format="NHWC",
                                 name='Conv2D_1')

        conv1_op = g.get_operation_by_name('Conv2D_1')
        layer_attributes = aimet_tensorflow.utils.op.conv.get_layer_attributes(
            sess=None, op=conv1_op, input_op_names=None, input_shape=None)

        # reshape input_data, output_data function expects activations in channels_first format
        input_data = input_data.reshape(2, 5, 12, 12)
        output_data = output_data.reshape(2, 10, 8, 8)

        sub_sample_input, sub_sample_output = InputMatchSearch.subsample_data(
            layer_attributes=layer_attributes,
            input_data=input_data,
            output_data=output_data,
            samples_per_image=1)
        # compare the inputs for both batches
        self.assertEqual(sub_sample_input.shape, (2, 5, 5, 5))
        self.assertTrue(
            np.array_equal(sub_sample_input[0, :, :, :], input_data[0, :, 1:6,
                                                                    1:6]))
        self.assertTrue(
            np.array_equal(sub_sample_input[1, :, :, :], input_data[1, :, 1:6,
                                                                    1:6]))

        # compare the output for batches
        output_pixel = (1, 1)
        self.assertEqual(sub_sample_output.shape, (2, 10))
        self.assertTrue(
            np.array_equal(sub_sample_output,
                           output_data[:, :, output_pixel[0],
                                       output_pixel[1]]))
Пример #5
0
    def get_sub_sampled_data(
            self, orig_layer: torch.nn.Module, input_data: np.ndarray,
            output_data: np.ndarray,
            samples_per_image: int) -> Tuple[np.ndarray, np.ndarray]:

        layer_attributes = (orig_layer.kernel_size, orig_layer.stride,
                            orig_layer.padding)

        # get the sub sampled input and output data
        sub_sampled_inp_data, sub_sampled_out_data = InputMatchSearch.subsample_data(
            layer_attributes, input_data, output_data, samples_per_image)

        return sub_sampled_inp_data, sub_sampled_out_data
Пример #6
0
    def test_find_input_match_for_pixel_from_output_data_baseline_channels_last(
            self):
        """
        Test find input match for output pixel implementation with channels_last (NHWC) format
        """
        tf.compat.v1.reset_default_graph()

        strides = [[1, 1], [2, 2], [1, 2], [2, 1]]
        kernel_size_options = [[1, 1], [2, 2], [3, 3], [1, 3], [3, 1]]
        padding_options = ['SAME', 'VALID']
        # test middle and border values
        size_options = [[5, 5], [0, 0], [3, 3]]

        all_options = [
            kernel_size_options, padding_options, size_options, strides
        ]

        for kernel_size, padding, size_opt, stride in itertools.product(
                *all_options):

            if isinstance(kernel_size, int):
                kernel_size = [kernel_size, kernel_size]

            if isinstance(stride, list) and len(stride) == 2:
                height, width = [
                    size_opt[0] // stride[0], size_opt[1] // stride[1]
                ]

            else:
                height, width = [size // stride for size in size_opt]

            output_data_pixel = (height, width)

            input_data = np.array(range(8 * 8)).reshape([1, 8, 8, 1])
            filter_data = np.ones([kernel_size[0], kernel_size[1], 1, 1],
                                  dtype=np.float32)

            g = tf.Graph()
            with g.as_default():
                inp_tensor = tf.Variable(initial_value=input_data,
                                         name='inp_tensor',
                                         dtype=tf.float32)
                filter_tensor = tf.Variable(initial_value=filter_data,
                                            name='filter_tensor',
                                            dtype=tf.float32)
                _ = tf.nn.conv2d(input=inp_tensor,
                                 filter=filter_tensor,
                                 strides=[1, stride[0], stride[1], 1],
                                 padding=padding,
                                 data_format="NHWC",
                                 name='Conv2D_1')
                init = tf.compat.v1.global_variables_initializer()

            conv1_op = g.get_operation_by_name('Conv2D_1')
            layer_attributes = aimet_tensorflow.utils.op.conv.get_layer_attributes(
                sess=None, op=conv1_op, input_op_names=None, input_shape=None)

            # reshape input_data, output_data function expects activations in channels_first format
            input_data = input_data.reshape(1, 1, 8, 8)
            input_match = InputMatchSearch._find_input_match_for_output_pixel(
                input_data[0], layer_attributes, output_data_pixel)

            sess = tf.compat.v1.Session(graph=g)
            sess.run(init)

            conv2d_out = sess.run(conv1_op.outputs[0])

            predicted_output = np.sum(input_match).astype(dtype='float32')
            generated_output = conv2d_out[0, height, width, 0]
            print('generated output: ', generated_output)
            print('predicted output: ', predicted_output)

            self.assertAlmostEqual(generated_output,
                                   predicted_output,
                                   places=2)
            self.assertTrue(
                np.prod(input_match.shape) == kernel_size[0] * kernel_size[1])

            sess.close()
Пример #7
0
    def get_sub_sampled_data(
            cls, orig_layer: Layer, pruned_layer: Layer, inp_op_names: List,
            orig_layer_db: LayerDatabase, comp_layer_db: LayerDatabase,
            data_set: tf.data.Dataset, batch_size: int,
            num_reconstruction_samples: int) -> (np.ndarray, np.ndarray):

        # pylint: disable=too-many-arguments
        # pylint: disable=too-many-locals
        """
        Get all the input data from pruned model and output data from original model

        :param orig_layer: layer in original model database
        :param pruned_layer: layer in pruned model database
        :param inp_op_names : input Op names, should be same in both models
        :param orig_layer_db: original model database, un-pruned, used to provide the actual outputs
        :param comp_layer_db: comp. model database, this is potentially already pruned in the upstreams layers of given
         layer name
        :param data_set: tf.data.Dataset object
        :param batch_size : batch size
        :param num_reconstruction_samples: The number of reconstruction samples
        :return: input_data, output_data
        """

        # Grow GPU memory as needed at the cost of fragmentation.
        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = True  # pylint: disable=no-member

        # create an iterator and iterator.get_next() Op in the same graph as dataset
        # TODO: currently dataset (user provided) and iterator are in the same graph, and the iterator is
        #  being created every time this function is called. Use re-initialize iterator
        sess = tf.compat.v1.Session(graph=data_set._graph, config=config)  # pylint: disable=protected-access

        with sess.graph.as_default():

            iterator = data_set.make_one_shot_iterator()
            next_element = iterator.get_next()

        # hard coded value
        samples_per_image = 10

        total_num_of_images = int(num_reconstruction_samples /
                                  samples_per_image)

        # number of possible batches - round up
        num_of_batches = math.ceil(total_num_of_images / batch_size)

        all_sub_sampled_inp_data = list()
        all_sub_sampled_out_data = list()

        for _ in range(num_of_batches):

            try:
                # get the data
                batch_data = sess.run(next_element)

                # output data from original model
                feed_dict = aimet_tensorflow.utils.common.create_input_feed_dict(
                    orig_layer_db.model.graph, inp_op_names, batch_data)
                output_data = orig_layer_db.model.run(
                    orig_layer.module.outputs[0], feed_dict=feed_dict)

                # input data from compressed model
                feed_dict = aimet_tensorflow.utils.common.create_input_feed_dict(
                    comp_layer_db.model.graph, inp_op_names, batch_data)
                input_data = comp_layer_db.model.run(
                    pruned_layer.module.inputs[0], feed_dict=feed_dict)

                # get the layer attributes (kernel_size, stride, padding)
                layer_attributes = aimet_tensorflow.utils.op.conv.get_layer_attributes(
                    sess=orig_layer_db.model,
                    op=orig_layer.module,
                    input_op_names=orig_layer_db.starting_ops,
                    input_shape=orig_layer_db.input_shape)

                # channels_last (NHWC) to channels_first data format (NCHW - Common format)
                input_data = np.transpose(input_data, (0, 3, 1, 2))
                output_data = np.transpose(output_data, (0, 3, 1, 2))

                # get the sub sampled input and output data
                sub_sampled_inp_data, sub_sampled_out_data = InputMatchSearch.subsample_data(
                    layer_attributes, input_data, output_data,
                    samples_per_image)
                all_sub_sampled_inp_data.append(sub_sampled_inp_data)
                all_sub_sampled_out_data.append(sub_sampled_out_data)

            except tf.errors.OutOfRangeError:

                raise StopIteration(
                    "There are insufficient batches of data in the provided dataset for the purpose of"
                    " weight reconstruction! Either reduce number of reconstruction samples or increase"
                    " data in dataset")

        # close the session
        sess.close()

        # accumulate total sub sampled input and output data
        return np.vstack(all_sub_sampled_inp_data), np.vstack(
            all_sub_sampled_out_data)
Пример #8
0
    def test_find_input_data_pixel_indices(self):
        """ Test utility to determine input data pixel height and width ranges are calculated correctly or not
        for given set of kernel_size, padding and stride combination"""
        in_channels = 1
        out_channels = 10
        input_data = np.random.rand(1, 1, 8, 8)
        strides = [[1, 1], [2, 2], [1, 2], [2, 1]]
        kernel_size_options = [[1, 1], [2, 2], [3, 3], [1, 3], [3, 1]]
        padding_options = [[0, 0], [1, 1], [2, 2], [1, 2], [2, 1]]
        all_options = [kernel_size_options, padding_options, strides]

        for kernel_size, padding, stride in itertools.product(*all_options):

            # we don't consider padding larger than kernel_size
            for i, ks in enumerate(kernel_size):
                if ks == 1:
                    padding = copy.deepcopy(padding)
                    padding[i] = 0

            layer = nn.Conv2d(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=padding)

            input_h, input_w = input_data.shape[2], input_data.shape[3]
            filter_h, filter_w = layer.kernel_size[0], layer.kernel_size[1]
            stride_h, stride_w = layer.stride[0], layer.stride[1]
            padding_h, padding_w = layer.padding[0], layer.padding[1]

            check_h = (input_h - filter_h + 2 * padding_h) / stride_h
            check_w = (input_w - filter_w + 2 * padding_w) / stride_w

            # if the condition is not satisfied, ignore that particular combination of kernel_size, padding and stride
            if not check_h % layer.stride[
                    0] == 0 or not check_w % layer.stride[1] == 0:
                continue

            # calculate output height and width max values
            out_height_max = int(check_h) + 1
            out_width_max = int(check_w) + 1

            size_options = [[x, y] for x in range(out_height_max)
                            for y in range(out_width_max)]

            layer_attributes = (layer.kernel_size, layer.stride, layer.padding)

            # iterate over all the output pixels
            for size_opt in size_options:

                height = size_opt[0]
                width = size_opt[1]
                in_data_height_range, in_data_width_range = \
                    InputMatchSearch._find_pixel_range_for_input_data(input_data_shape=input_data.shape[1:],
                                                                      layer_attributes=layer_attributes,
                                                                      pixel=(height, width))

                # check input data height indices range
                self.assertEqual(
                    in_data_height_range[0],
                    max(0, (height * layer.stride[0]) - layer.padding[0]))
                self.assertEqual(in_data_height_range[1],
                                 (height * layer.stride[0]) -
                                 layer.padding[0] + layer.kernel_size[0])

                # check input data width indices range
                self.assertEqual(
                    in_data_width_range[0],
                    max(0, (width * layer.stride[1]) - layer.padding[1]))
                self.assertEqual(in_data_width_range[1],
                                 (width * layer.stride[1]) - layer.padding[1] +
                                 layer.kernel_size[1])
Пример #9
0
    def test_find_input_match_for_pixel_from_output_data_baseline(self):

        batch_num = 0
        strides = [[1, 1], [2, 2], [1, 2], [2, 1]]
        kernel_size_options = [[1, 1], [2, 2], [3, 3], [1, 3], [3, 1]]
        padding_options = [[0, 0], [1, 1], [2, 2], [1, 2], [2, 1]]

        max_height = 8
        max_width = 8

        input_frame = np.array(range(8 * 8)).reshape(
            [1, 1, max_height, max_width])
        num_output_pixels = 5

        # randomly pick samples per image for height and width dimension
        heights = np.random.choice(range(2, (max_height - 2)),
                                   size=[num_output_pixels],
                                   replace=True)
        widths = np.random.choice(range(2, (max_width - 2)),
                                  size=[num_output_pixels],
                                  replace=True)

        size_options = [[a, b] for a, b in zip(heights, widths)]
        print("Size Options", size_options)

        all_options = [
            kernel_size_options, padding_options, size_options, strides
        ]

        for kernel_size, padding, size_opt, stride in itertools.product(
                *all_options):

            if isinstance(kernel_size, int):
                kernel_size = [kernel_size, kernel_size]
            if isinstance(stride, list) and len(stride) == 2:
                height, width = [
                    size_opt[0] // stride[0], size_opt[1] // stride[1]
                ]
            else:
                height, width = [size // stride for size in size_opt]

            output_data_pixel = (height, width)

            conv_filter = nn.Conv2d(1,
                                    1,
                                    kernel_size=kernel_size,
                                    stride=stride,
                                    padding=padding)

            layer_attributes = (conv_filter.kernel_size, conv_filter.stride,
                                conv_filter.padding)

            conv_filter.weight.data =\
                torch.FloatTensor(np.ones([1, 1, kernel_size[0], kernel_size[1]], dtype=np.float32))

            input_match = InputMatchSearch._find_input_match_for_output_pixel(
                input_frame[batch_num], layer_attributes, output_data_pixel)
            conv2d_out = functional.conv2d(torch.FloatTensor(input_frame),
                                           conv_filter.weight.data,
                                           stride=stride,
                                           padding=padding)

            predicted_output = np.sum(input_match)
            generated_output = conv2d_out[0, 0, height, width].detach().numpy()

            assert generated_output == predicted_output
            assert np.prod(
                input_match.shape) == kernel_size[0] * kernel_size[1]