Exemplo n.º 1
0
    def test_inception_model_conv_below_avgpool(self):
        """ Test winnowing inception model with conv below avgpool """
        model = models.Inception3()
        model.eval()
        input_shape = [1, 3, 299, 299]
        input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
        list_of_modules_to_winnow = [(model.Mixed_5b.branch_pool.conv,
                                      input_channels_to_prune)]
        print(model.Mixed_5b.branch_pool.conv)
        print(model.Mixed_5b.branch_pool.conv.out_channels,
              model.Mixed_5b.branch_pool.conv.in_channels)

        # Call the Winnow API.
        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        self.assertEqual(new_model.Mixed_5b.branch_pool.conv[1].out_channels,
                         32)
        self.assertEqual(
            list(new_model.Mixed_5b.branch_pool.conv[1].weight.shape),
            [32, 184, 1, 1])
        del model
        del new_model
Exemplo n.º 2
0
    def test_mask_propagation_through_concat(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleConcat()

        # Test forward pass on the copied model before zeroing out channels of layers.
        input_shape = [1, 3, 224, 224]
        module_zero_channels_list = []
        module = model.conv4
        input_channels_to_prune = [
            1, 3, 5, 7, 9, 19, 21, 23, 25, 43, 45, 47, 49, 51, 57, 59, 61, 63
        ]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        winnowed_model, _ = winnow_model(model,
                                         input_shape,
                                         module_zero_channels_list,
                                         in_place=True,
                                         verbose=True)

        # validate winnowed net
        # input_tensor = torch.rand(input_shape).double()
        input_tensor = torch.rand(input_shape)
        # winnowed_model.double()
        winnowed_model.eval()
        _ = winnowed_model(input_tensor)
        self.assertEqual(0, 0)
Exemplo n.º 3
0
    def test_inception_model_conv_below_conv(self):
        """ Test winnowing inception model conv below conv """
        model = models.Inception3()
        model.eval()
        input_shape = [1, 3, 299, 299]
        input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]

        list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_2.conv,
                                      input_channels_to_prune)]

        print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)

        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        self.assertEqual(new_model.Mixed_5b.branch3x3dbl_1.conv.out_channels,
                         56)
        self.assertEqual(
            list(new_model.Mixed_5b.branch3x3dbl_1.conv.weight.shape),
            [56, 192, 1, 1])
        del model
        del new_model
Exemplo n.º 4
0
    def test_inception_model_conv_below_split(self):
        """ Test winnowing inception model with conv below split """
        model = models.Inception3()
        model.eval()
        input_shape = [1, 3, 299, 299]
        input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]

        list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_1.conv,
                                      input_channels_to_prune)]

        print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)

        # Call the Winnow API.
        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        del model
        del new_model

        model = models.Inception3()
        model.eval()
        input_shape = [1, 3, 299, 299]
        input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
        list_of_modules_to_winnow = [(model.Mixed_5b.branch1x1.conv,
                                      input_channels_to_prune)]
        print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)

        # Call the Winnow API.
        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        del model
        del new_model
        self.assertEqual(0, 0)
Exemplo n.º 5
0
    def _winnow_and_reconstruct_layer(self, orig_layer_db: LayerDatabase,
                                      comp_layer_db: LayerDatabase,
                                      layer: Layer, comp_ratio: float,
                                      perform_reconstruction: bool):
        """
        Replaces a given layer within the comp_layer_db with a pruned version of the layer

        :param orig_layer_db: original Layer database
        :param comp_layer_db: Layer database, will be modified
        :param layer: Layer to prune
        :param comp_ratio: compression - ratio
        :return:
        """
        # 1) channel selection
        prune_indices = self._select_inp_channels(layer.module, comp_ratio)

        # 2) winnow - in place API
        _, module_list = winnow_model(
            comp_layer_db.model,
            self._input_shape, [(layer.module, prune_indices)],
            reshape=self._allow_custom_downsample_ops,
            in_place=True)

        # 3) data sub sampling and reconstruction
        if perform_reconstruction:
            # get original layer reference
            orig_layer = orig_layer_db.find_layer_by_name(layer.name)
            self._data_subsample_and_reconstruction(orig_layer.module,
                                                    layer.module,
                                                    orig_layer_db.model,
                                                    comp_layer_db.model)

        # 4) update layer database
        if module_list:
            self._update_layer_database_after_winnowing(
                comp_layer_db, module_list)
Exemplo n.º 6
0
    def test_mask_propagation_through_single_chunk(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleChunk()

        # Test forward pass on the copied model before zering out channels of layers.
        input_shape = [1, 3, 224, 224]

        module_zero_channels_list = []
        module = model.conv4
        input_channels_to_prune = [5, 9]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        _, _ = winnow_model(model,
                            input_shape,
                            module_zero_channels_list,
                            in_place=True,
                            verbose=True)
        self.assertEqual(0, 0)
Exemplo n.º 7
0
    def test_conv_to_conv_mask_propagation(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleResidual()

        # Test forward pass on the copied model before winnowing.
        input_shape = [1, 3, 224, 224]
        input_tensor = torch.rand(input_shape).double()
        model.double()
        model.eval()
        print(
            "test_conv_to_conv_mask_propagation(): Testing forward pass before winnowing."
        )
        validation_output = model(input_tensor)

        # Convert the model back to float.
        model.float()

        module_zero_channels_list = []
        module = model.conv3
        input_channels_to_prune = [1, 3]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        winnowed_model, _ = winnow_model(model,
                                         input_shape,
                                         module_zero_channels_list,
                                         in_place=True,
                                         verbose=True)

        # validate winnowed net
        input_tensor = torch.rand(input_shape).double()
        winnowed_model.double()
        winnowed_model.eval()
        test_output = winnowed_model(input_tensor)

        self.assertTrue(test_output.shape == validation_output.shape)
        # self.assertTrue(test_output.allclose(validation_output)) # TBD. Why is this failing?
        print(test_output)
        print(validation_output)

        # In the winnowed model, conv3 has in_channels = 62, out_channels = 64
        self.assertTrue(winnowed_model.conv3.in_channels == 62)
        self.assertTrue(winnowed_model.conv3.out_channels == 64)

        # The winnowed model's bn2 layer has 62 num_features
        self.assertEqual(winnowed_model.bn2.num_features, 62)
        self.assertEqual(list(winnowed_model.bn2.weight.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.bias.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.running_mean.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.running_var.shape), [62])

        # In the winnowed model, conv2 has in_channels = 64, out_channels = 62 (impacted by layer3 pruning)
        self.assertTrue(winnowed_model.conv2.in_channels == 64)
        self.assertTrue(winnowed_model.conv2.out_channels == 62)

        print(
            "test_conv_to_conv_mask_propagation(): Successfully validated winnowed  model."
        )
Exemplo n.º 8
0
    def test_winnowing_multiple_zeroed_resnet34(self):
        """ Tests winnowing resnet18 with multiple layers with zero planes. """

        model = models.resnet34(pretrained=False)
        model.eval()

        # Test forward pass on the copied model before zeroing out channels in any layer..
        input_shape = [1, 3, 224, 224]

        list_of_modules_to_winnow = []

        input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
        list_of_modules_to_winnow.append(
            (model.layer4[1].conv2, input_channels_to_prune))

        input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
        list_of_modules_to_winnow.append(
            (model.layer4[0].conv1, input_channels_to_prune))

        input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
        list_of_modules_to_winnow.append(
            (model.layer3[1].conv2, input_channels_to_prune))

        input_channels_to_prune = [33, 44, 55]
        list_of_modules_to_winnow.append(
            (model.layer2[1].conv2, input_channels_to_prune))

        input_channels_to_prune = [11, 12, 13, 14, 15]
        list_of_modules_to_winnow.append(
            (model.layer2[0].conv2, input_channels_to_prune))

        input_channels_to_prune = [55, 56, 57, 58, 59]
        list_of_modules_to_winnow.append(
            (model.layer1[1].conv1, input_channels_to_prune))

        input_channels_to_prune = [42, 44, 46]
        list_of_modules_to_winnow.append(
            (model.layer1[0].conv2, input_channels_to_prune))

        # Call the Winnow API.
        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        # compare zeroed out and pruned model output
        # use double precision for lower absolute error
        input_tensor = torch.rand(input_shape).double()
        model.double()
        model.eval()
        validation_output = model(input_tensor)

        # validate winnowed net
        new_model.double()
        new_model.eval()
        test_output = new_model(input_tensor)

        self.assertTrue(test_output.shape == validation_output.shape)

        # layer1.0.conv2 input channels pruned from 64 --> 61
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
        self.assertEqual(list(new_model.layer1[0].conv2.weight.shape),
                         [64, 61, 3, 3])
        self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)

        # layer1.1.conv1 output channels pruned from 64 --> 59
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 59)
        self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape),
                         [64, 59, 3, 3])

        # layer2.0.conv2 input channels pruned from 128 --> 123
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
        self.assertEqual(list(new_model.layer2[0].conv2.weight.shape),
                         [128, 123, 3, 3])
        self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)

        # layer2.1.conv2 input channels pruned from 128 --> 125
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
        self.assertEqual(list(new_model.layer2[1].conv2.weight.shape),
                         [128, 125, 3, 3])
        self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)

        # layer3.1.conv2 input channels pruned from 256 --> 245
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
        self.assertEqual(list(new_model.layer3[1].conv2.weight.shape),
                         [256, 245, 3, 3])
        self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)

        # layer4.0.conv1 input channels pruned from 256 --> 245
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 245)
        self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape),
                         [512, 245, 3, 3])

        # layer4.1.conv2 input channels pruned from 512 --> 501
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
        self.assertEqual(list(new_model.layer4[1].conv2.weight.shape),
                         [512, 501, 3, 3])
        self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
Exemplo n.º 9
0
    def test_winnowing_multiple_zeroed_resnet101(self):
        """ Tests winnowing resnet18 with multiple layers  with zero planes. """

        model = models.resnet101(pretrained=False)
        model.eval()

        input_shape = [1, 3, 224, 224]

        list_of_modules_to_winnow = []

        # For layer4[1].conv2 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
        input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
        list_of_modules_to_winnow.append(
            (model.layer4[1].conv2, input_channels_to_prune))

        # For layer4[0].conv1 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
        input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
        list_of_modules_to_winnow.append(
            (model.layer4[0].conv1, input_channels_to_prune))

        # For layer3[1].conv2 layer, zero out input channels 15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9
        input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
        list_of_modules_to_winnow.append(
            (model.layer3[1].conv2, input_channels_to_prune))

        # For layer2[1].conv2 layer, zero out input channels 33, 44, 55
        input_channels_to_prune = [33, 44, 55]
        list_of_modules_to_winnow.append(
            (model.layer2[1].conv2, input_channels_to_prune))

        # For layer2[0].conv2 layer, zero out input channels 1, 12, 13, 14, 15
        input_channels_to_prune = [11, 12, 13, 14, 15]
        list_of_modules_to_winnow.append(
            (model.layer2[0].conv2, input_channels_to_prune))

        # For layer1[1].conv1 layer, zero out input channels 55, 56, 57, 58, 59
        input_channels_to_prune = [55, 56, 57, 58, 59]
        list_of_modules_to_winnow.append(
            (model.layer1[1].conv1, input_channels_to_prune))

        # For layer1[0].conv2 layer, zero out input channels 42, 44, 36
        input_channels_to_prune = [42, 44, 46]
        list_of_modules_to_winnow.append(
            (model.layer1[0].conv2, input_channels_to_prune))

        # Call the Winnow API.
        new_model, _ = winnow_model(model,
                                    input_shape,
                                    list_of_modules_to_winnow,
                                    reshape=True,
                                    in_place=False,
                                    verbose=True)

        input_tensor = torch.rand(input_shape).double()
        model.double()
        model.eval()
        validation_output = model(input_tensor)

        # validate winnowed net
        new_model.double()
        new_model.eval()
        test_output = new_model(input_tensor)

        self.assertTrue(test_output.shape == validation_output.shape)

        # layer1.0.conv2 input channels pruned from 64 --> 61
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
        self.assertEqual(list(new_model.layer1[0].conv2.weight.shape),
                         [64, 61, 3, 3])
        self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)

        # layer1.1.conv1 output channels pruned from 64 --> 59
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
        self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape),
                         [64, 251, 1, 1])

        # layer2.0.conv2 input channels pruned from 128 --> 123
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
        self.assertEqual(list(new_model.layer2[0].conv2.weight.shape),
                         [128, 123, 3, 3])
        self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)

        # layer2.1.conv2 input channels pruned from 128 --> 125
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
        self.assertEqual(list(new_model.layer2[1].conv2.weight.shape),
                         [128, 125, 3, 3])
        self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)

        # layer3.1.conv2 input channels pruned from 256 --> 245
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
        self.assertEqual(list(new_model.layer3[1].conv2.weight.shape),
                         [256, 245, 3, 3])
        self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)

        # layer4.0.conv1 input channels pruned from 256 --> 245
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
        self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape),
                         [512, 1013, 1, 1])

        # layer4.1.conv2 input channels pruned from 512 --> 501
        # weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
        self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
        self.assertEqual(list(new_model.layer4[1].conv2.weight.shape),
                         [512, 501, 3, 3])
        self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)