def generate_and_add_module_winnow_list_with_names(
        model: torch.nn.Module,
        list_of_modules_to_winnow: List[Tuple[torch.nn.Module, List[int]]]):
    """
    Generates the module names for the modules to winnow.
    Creates a Tuple with Module, list of channels to winnow and Module Name
    and adds the Tuple to a list for later use.

    :param model: model for which to compare modules from
    :param list_of_modules_to_winnow: List of tuples of modules to winnow and corresponding channels to winnow
    :return: list of module information.
    """

    list_of_module_info = []
    if list_of_modules_to_winnow is not None:

        model_name = type(model).__name__
        logger.debug("Model name: %s", model_name)

        for module, list_of_channels_to_winnow, in list_of_modules_to_winnow:
            name = get_layer_name(model, module)

            # This name doesn't contain the model's name.
            # Prepend the model's name to the module's name.
            name = '.'.join([model_name, name])

            mod_tuple = (module, list_of_channels_to_winnow, name)
            list_of_module_info.append(mod_tuple)

    return list_of_module_info
Exemplo n.º 2
0
    def test_find_layer_groups_to_scale_for_network_with_residuals(self):

        torch.manual_seed(10)
        model = MockMobileNetV2()
        model.eval()

        fold_all_batch_norms(model, (1, 3, 224, 224))
        graph_search = GraphSearchUtils(model, (1, 3, 224, 224))
        layer_groups = graph_search.find_layer_groups_to_scale()
        self.assertEqual(4, len(layer_groups))
        self.assertIn([
            model.features[3].conv[0], model.features[3].conv[3],
            model.features[3].conv[6]
        ], layer_groups)
        self.assertIn([
            model.features[4].conv[0], model.features[4].conv[3],
            model.features[4].conv[6]
        ], layer_groups)
        self.assertIn([
            model.features[5].conv[0], model.features[5].conv[3],
            model.features[5].conv[6], model.features[6][0]
        ], layer_groups)

        for layer_group in layer_groups:
            print("Group ------- ")
            for module in layer_group:
                print("   " + get_layer_name(model, module))
Exemplo n.º 3
0
    def test_mask_propagation_through_concat(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleConcat()

        # Test forward pass on the copied model before zeroing out channels of layers.
        input_shape = [1, 3, 224, 224]
        module_zero_channels_list = []
        module = model.conv4
        input_channels_to_prune = [
            1, 3, 5, 7, 9, 19, 21, 23, 25, 43, 45, 47, 49, 51, 57, 59, 61, 63
        ]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        winnowed_model, _ = winnow_model(model,
                                         input_shape,
                                         module_zero_channels_list,
                                         in_place=True,
                                         verbose=True)

        # validate winnowed net
        # input_tensor = torch.rand(input_shape).double()
        input_tensor = torch.rand(input_shape)
        # winnowed_model.double()
        winnowed_model.eval()
        _ = winnowed_model(input_tensor)
        self.assertEqual(0, 0)
Exemplo n.º 4
0
    def test_data_sub_sampling_and_reconstruction_without_bias(self):
        """Test end to end data sub sampling and reconstruction for MNIST conv2 layer (without bias)"""

        orig_model = mnist_model()
        # set bias to None
        orig_model.conv2.bias = None

        comp_model = copy.deepcopy(orig_model)

        dataset_size = 100
        batch_size = 10
        # max out number of batches
        number_of_batches = 10
        samples_per_image = 10
        num_reconstruction_samples = number_of_batches * batch_size * samples_per_image
        # create fake data loader with image size (1, 28, 28)
        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size)

        conv2_pr_layer_name = get_layer_name(comp_model, comp_model.conv2)

        sampled_inp_data, sampled_out_data = DataSubSampler.get_sub_sampled_data(
            orig_layer=orig_model.conv2,
            pruned_layer=comp_model.conv2,
            orig_model=orig_model,
            comp_model=comp_model,
            data_loader=data_loader,
            num_reconstruction_samples=num_reconstruction_samples)

        conv_layer = get_layer_by_name(model=comp_model,
                                       layer_name=conv2_pr_layer_name)

        assert conv_layer == comp_model.conv2
        # original weight before reconstruction
        orig_weight = conv_layer.weight.data
        WeightReconstructor.reconstruct_params_for_conv2d(
            layer=conv_layer,
            input_data=sampled_inp_data,
            output_data=sampled_out_data)
        # new weight after reconstruction
        new_weight = conv_layer.weight.data
        new_bias = conv_layer.bias

        self.assertEqual(new_weight.shape, orig_weight.shape)
        self.assertEqual(new_bias, None)
        # if you increase the data (data set size, number of batches or samples per image),
        # reduce the absolute tolerance
        self.assertTrue(
            np.allclose(to_numpy(new_weight), to_numpy(orig_weight),
                        atol=1e-0))
Exemplo n.º 5
0
    def test_mask_propagation_through_single_chunk(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleChunk()

        # Test forward pass on the copied model before zering out channels of layers.
        input_shape = [1, 3, 224, 224]

        module_zero_channels_list = []
        module = model.conv4
        input_channels_to_prune = [5, 9]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        _, _ = winnow_model(model,
                            input_shape,
                            module_zero_channels_list,
                            in_place=True,
                            verbose=True)
        self.assertEqual(0, 0)
Exemplo n.º 6
0
    def test_conv_to_conv_mask_propagation(self):
        """ After the graph is constructed, the Op should have default masks and connectivity for all module types. """
        logger.debug("Test default mask and connectivity.")
        model = SingleResidual()

        # Test forward pass on the copied model before winnowing.
        input_shape = [1, 3, 224, 224]
        input_tensor = torch.rand(input_shape).double()
        model.double()
        model.eval()
        print(
            "test_conv_to_conv_mask_propagation(): Testing forward pass before winnowing."
        )
        validation_output = model(input_tensor)

        # Convert the model back to float.
        model.float()

        module_zero_channels_list = []
        module = model.conv3
        input_channels_to_prune = [1, 3]

        module_mask_pair = (module, input_channels_to_prune)
        module_zero_channels_list.append(module_mask_pair)

        print("Order of modules in in the API:",
              [get_layer_name(model, m) for m, _ in module_zero_channels_list])
        # API version 2.
        winnowed_model, _ = winnow_model(model,
                                         input_shape,
                                         module_zero_channels_list,
                                         in_place=True,
                                         verbose=True)

        # validate winnowed net
        input_tensor = torch.rand(input_shape).double()
        winnowed_model.double()
        winnowed_model.eval()
        test_output = winnowed_model(input_tensor)

        self.assertTrue(test_output.shape == validation_output.shape)
        # self.assertTrue(test_output.allclose(validation_output)) # TBD. Why is this failing?
        print(test_output)
        print(validation_output)

        # In the winnowed model, conv3 has in_channels = 62, out_channels = 64
        self.assertTrue(winnowed_model.conv3.in_channels == 62)
        self.assertTrue(winnowed_model.conv3.out_channels == 64)

        # The winnowed model's bn2 layer has 62 num_features
        self.assertEqual(winnowed_model.bn2.num_features, 62)
        self.assertEqual(list(winnowed_model.bn2.weight.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.bias.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.running_mean.shape), [62])
        self.assertEqual(list(winnowed_model.bn2.running_var.shape), [62])

        # In the winnowed model, conv2 has in_channels = 64, out_channels = 62 (impacted by layer3 pruning)
        self.assertTrue(winnowed_model.conv2.in_channels == 64)
        self.assertTrue(winnowed_model.conv2.out_channels == 62)

        print(
            "test_conv_to_conv_mask_propagation(): Successfully validated winnowed  model."
        )