def test_select_per_layer_comp_ratios_with_spatial_svd_pruner(self):

        pruner = SpatialSvdPruner()
        eval_func = unittest.mock.MagicMock()
        rounding_algo = unittest.mock.MagicMock()
        eval_func.side_effect = [
            10, 20, 30, 40, 50, 60, 70, 80, 90, 11, 21, 31, 35, 40, 45, 50, 55,
            60
        ]
        rounding_algo.round.side_effect = [
            0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.3, 0.4,
            0.5, 0.6, 0.7, 0.8, 0.9
        ]
        model = mnist_torch_model.Net()
        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))

        selected_layers = [
            layer for layer in layer_db if isinstance(layer.module, nn.Conv2d)
        ]
        layer_db.mark_picked_layers(selected_layers)

        # Instantiate child
        greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            eval_func,
            20,
            CostMetric.mac,
            Decimal(0.4),
            10,
            True,
            None,
            rounding_algo,
            False,
            bokeh_session=None)
        layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios(
        )

        original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)

        for layer in layer_db:
            if layer not in selected_layers:
                layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
        compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(
            layer_db, layer_comp_ratio_list, CostMetric.mac)

        actual_compression_ratio = compressed_cost.mac / original_cost.mac
        self.assertTrue(
            math.isclose(Decimal(0.3), actual_compression_ratio, abs_tol=0.8))

        print('\n')
        for pair in layer_comp_ratio_list:
            print(pair)
    def test_per_layer_eval_scores(self):

        url, process = start_bokeh_server_session(8006)
        bokeh_session = BokehServerSession(url=url, session_id="compression")

        pruner = unittest.mock.MagicMock()
        eval_func = unittest.mock.MagicMock()

        model = mnist_torch_model.Net().to('cpu')

        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        layer1 = layer_db.find_layer_by_name('conv1')
        layer_db.mark_picked_layers([layer1])

        eval_func.side_effect = [90, 80, 70, 60, 50, 40, 30, 20, 10]

        # Instantiate child
        greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            eval_func,
            20,
            CostMetric.mac,
            0.5,
            10,
            True,
            None,
            None,
            False,
            bokeh_session=None)
        progress_bar = ProgressBar(1,
                                   "eval scores",
                                   "green",
                                   bokeh_session=bokeh_session)
        data_table = DataTable(num_columns=3,
                               num_rows=1,
                               column_names=[
                                   '0.1', '0.2', '0.3', '0.4', '0.5', '0.6',
                                   '0.7', '0.8', '0.9'
                               ],
                               row_index_names=[layer1.name],
                               bokeh_session=bokeh_session)
        pruner.prune_model.return_value = layer_db
        eval_dict = greedy_algo._compute_layerwise_eval_score_per_comp_ratio_candidate(
            data_table, progress_bar, layer1)

        self.assertEqual(90, eval_dict[Decimal('0.1')])
        bokeh_session.server_session.close("test complete")
        os.killpg(os.getpgid(process.pid), signal.SIGTERM)
Esempio n. 3
0
    def test_pick_compression_layers_top_n_layers(self):

        # Memory
        logger.debug(self.id())
        model = MnistModel().to("cpu")

        layer_database = LayerDatabase(model=model, input_shape=(1, 1, 28, 28))

        with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated._perform_layer_selection'):
            layer_selector = ls.LayerSelectorDeprecated(
                aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, None, layer_database,
                num_layers=2)

        picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate,
                                                                cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                                                                layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers,
                                                                num_layers=2)

        self.assertEqual(picked_layers[0].module, model.fc1)
        self.assertEqual(picked_layers[1].module, model.conv2)
        self.assertEqual(2, len(picked_layers))

        # MAC
        picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac,
                                                                layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers,
                                                                num_layers=2)

        self.assertEqual(picked_layers[0].module, model.conv2)
        self.assertEqual(picked_layers[1].module, model.fc1)
        self.assertEqual(2, len(picked_layers))
Esempio n. 4
0
    def testSpatialSvd(self):

        torch.manual_seed(1)

        model = mnist_torch_model.Net()

        rounding_algo = unittest.mock.MagicMock()
        rounding_algo.round.side_effect = [
            0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.3, 0.4,
            0.5, 0.6, 0.7, 0.8, 0.9
        ]

        mock_eval = unittest.mock.MagicMock()
        mock_eval.side_effect = [
            100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 90, 80, 70, 60, 50, 40,
            30, 20, 10, 50
        ]

        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        pruner = SpatialSvdPruner()
        comp_ratio_select_algo = GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            mock_eval,
            20,
            CostMetric.mac,
            Decimal(0.5),
            10,
            True,
            None,
            rounding_algo,
            True,
            bokeh_session=None)

        layer_selector = ConvNoDepthwiseLayerSelector()
        spatial_svd_algo = CompressionAlgo(
            layer_db,
            comp_ratio_select_algo,
            pruner,
            mock_eval,
            layer_selector,
            modules_to_ignore=[],
            cost_calculator=SpatialSvdCostCalculator(),
            use_cuda=next(model.parameters()).is_cuda)

        compressed_layer_db, stats = spatial_svd_algo.compress_model(
            CostMetric.mac, trainer=None)

        self.assertTrue(
            isinstance(compressed_layer_db.model.conv1, torch.nn.Sequential))
        self.assertTrue(
            isinstance(compressed_layer_db.model.conv2, torch.nn.Sequential))
        self.assertTrue(stats.per_layer_stats[0].compression_ratio <= 0.5)
        self.assertEqual(0.3, stats.per_layer_stats[1].compression_ratio)

        print("Compressed model:")
        print(compressed_layer_db.model)

        print(stats)
Esempio n. 5
0
    def test_pick_compression_layers_top_x_percent(self):

        logger.debug(self.id())
        model = MnistModel().to("cpu")

        layer_database = LayerDatabase(model=model, input_shape=(1, 1, 28, 28))

        with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated._perform_layer_selection'):
            layer_selector = ls.LayerSelectorDeprecated(
                aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, None, layer_database, percent_thresh=None)

        # 100 % threshold
        picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                                                                layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent,
                                                                percent_thresh=100)

        self.assertEqual(model.fc1, picked_layers[0].module)
        self.assertEqual(model.conv2, picked_layers[1].module)
        self.assertEqual(model.fc2, picked_layers[2].module)
        self.assertEqual(3, len(picked_layers))

        # 80% criterion

        picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                                                                layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent,
                                                                percent_thresh=80)

        self.assertEqual(model.conv2, picked_layers[0].module)
        self.assertEqual(model.fc2, picked_layers[1].module)
        self.assertEqual(2, len(picked_layers))
Esempio n. 6
0
    def test_choose_best_ranks(self):

        model = MnistModel().to("cpu")
        layer_database = LayerDatabase(model=model, input_shape=(1, 1, 28, 28))
        run_model_return_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
        run_model = unittest.mock.Mock(side_effect=run_model_return_values)

        with unittest.mock.patch('aimet_torch.layer_database.LayerDatabase'):
            with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
                svd = s.SvdImpl(model=model, run_model=run_model, run_model_iterations=1, input_shape=(1, 1, 28, 28),
                                compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                                layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)

        svd._network_cost = (500, 500)

        svd._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
        with unittest.mock.patch('aimet_torch.svd.model_stats_calculator.ModelStats.compute_compression_ratio') as compute_compression_ratio:
            with unittest.mock.patch('aimet_torch.svd.svd_pruner_deprecated.ModelPruner.create_compressed_model') as create_compressed_model:
                with unittest.mock.patch('aimet_torch.svd.rank_selector.RankSelector._select_candidate_ranks') as select_candidate_ranks:
                    select_candidate_ranks.return_value = 20
                    compute_compression_ratio.side_effect = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
                    create_compressed_model.return_value = None, None, None
                    rank_selector = rank_select.RankSelector(svd_lib_ref=svd._svd_lib_ref)
                    rank_selector.choose_best_rank(model=model, run_model=run_model, run_model_iterations=1,
                                                   use_cuda=False, metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, error_margin=1,
                                                   baseline_perf=0.5, num_rank_indices=20, database=layer_database)
Esempio n. 7
0
    def test_prune_layer(self):

        model = mnist_model.Net()

        # Create a layer database
        orig_layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        # Copy the db
        comp_layer_db = copy.deepcopy(orig_layer_db)

        conv2 = comp_layer_db.find_layer_by_name('conv2')
        weight_svd_pruner = WeightSvdPruner()
        weight_svd_pruner._prune_layer(orig_layer_db, comp_layer_db, conv2, 0.5, aimet_common.defs.CostMetric.mac)

        conv2_a = comp_layer_db.find_layer_by_name('conv2.0')
        conv2_b = comp_layer_db.find_layer_by_name('conv2.1')

        self.assertEqual((1, 1), conv2_a.module.kernel_size)
        self.assertEqual(32, conv2_a.module.in_channels)
        self.assertEqual(15, conv2_a.module.out_channels)

        self.assertEqual((5, 5), conv2_b.module.kernel_size)
        self.assertEqual(15, conv2_b.module.in_channels)
        self.assertEqual(64, conv2_b.module.out_channels)

        self.assertTrue(isinstance(comp_layer_db.model.conv2, nn.Sequential))

        for layer in comp_layer_db:
            print("Layer: " + layer.name)
            print("   Module: " + str(layer.module))

        print(comp_layer_db.model)
Esempio n. 8
0
    def test_split_manual_rank(self):
        model = MnistModel().to("cpu")
        run_model = mnist_model.evaluate
        logger.debug(self.id())

        intf_defs = aimet_torch.svd.svd_intf_defs_deprecated

        layer_database = LayerDatabase(model=model, input_shape=(1, 1, 28, 28))
        with unittest.mock.patch('aimet_torch.layer_database.LayerDatabase'):
            with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
                svd = s.SvdImpl(model=model, run_model=None, run_model_iterations=1, input_shape=(1, 1, 28, 28),
                                compression_type=intf_defs.CompressionTechnique.svd,
                                cost_metric=intf_defs.CostMetric.memory,
                                layer_selection_scheme=intf_defs.LayerSelectionScheme.manual,
                                layers_to_compress=[model.fc1])
        layer_rank_list = [[model.fc1, 9]]
        with unittest.mock.patch('aimet_common.cost_calculator.CostCalculator.compute_network_cost') as compute_network_cost:
            compute_network_cost.return_value = cc.Cost(100, 200)
            svd._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
            split_weights = [np.zeros((400, model.fc1.in_features)).flatten().tolist(),
                             np.zeros((model.fc1.out_features, 400)).flatten().tolist()]
            svd._svd_lib_ref.SplitLayerWeights.return_value = split_weights

            split_biases = [np.zeros(400).flatten().tolist(),
                            np.zeros(model.fc1.out_features).flatten().tolist()]
            svd._svd_lib_ref.SplitLayerBiases.return_value = split_biases
            rank_selector = rank_select.RankSelector(svd_lib_ref=svd._svd_lib_ref)
            rank_data_list, svd_rank_pair_dict = rank_selector.split_manual_rank(model=model, run_model=run_model,
                                                                                 run_model_iterations=1, use_cuda=False,
                                                                                 metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                                                                                 database=layer_database,
                                                                                 layer_rank_list=layer_rank_list)
            self.assertEqual(len(svd_rank_pair_dict), 1)
    def test_prune_layer(self):

        orig_model = mnist_torch_model.Net()
        orig_model.eval()
        # Create a layer database
        orig_layer_db = LayerDatabase(orig_model, input_shape=(1, 1, 28, 28))
        # Copy the db
        comp_layer_db = copy.deepcopy(orig_layer_db)

        dataset_size = 100
        batch_size = 10
        # max out number of batches
        number_of_batches = 10
        samples_per_image = 10
        num_reconstruction_samples = number_of_batches * batch_size * samples_per_image
        # create fake data loader with image size (1, 28, 28)
        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size)

        input_channel_pruner = InputChannelPruner(
            data_loader=data_loader,
            input_shape=(1, 1, 28, 28),
            num_reconstruction_samples=num_reconstruction_samples,
            allow_custom_downsample_ops=True)

        conv2 = comp_layer_db.find_layer_by_name('conv2')
        input_channel_pruner._prune_layer(orig_layer_db, comp_layer_db, conv2,
                                          0.5, CostMetric.mac)

        self.assertTrue(comp_layer_db.model.conv2.in_channels, 16)
        self.assertTrue(comp_layer_db.model.conv2.out_channels, 64)
Esempio n. 10
0
    def test_set_parent_attribute_two_deep(self):
        """With a two-deep model"""
        class SubNet(nn.Module):
            def __init__(self):
                super(SubNet, self).__init__()
                self.conv1 = nn.Conv2d(30, 40, 5)
                self.conv2 = nn.Conv2d(40, 50, kernel_size=5)

            def forward(self, *inputs):
                pass

        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv1 = nn.Conv2d(1, 10, 5)
                self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
                self.conv2_drop = nn.Dropout2d()
                self.subnet1 = SubNet()
                self.fc1 = nn.Linear(320, 50)
                self.subnet2 = SubNet()
                self.fc2 = nn.Linear(50, 10)

            def forward(self, *inputs):
                pass

        net = Net()
        model = net.to("cpu")

        # create layer attribute
        output_activation_shape = None

        layers = {id(model.subnet1.conv2): Layer(model.subnet1.conv2, id(model.subnet1.conv2),
                                                 output_activation_shape),
                  id(model.subnet2.conv1): Layer(model.subnet2.conv1, id(model.subnet2.conv1),
                                                 output_activation_shape),
                  id(model.fc2):           Layer(model.fc2, id(model.fc2),
                                                 output_activation_shape)}

        LayerDatabase.set_reference_to_parent_module(model, layers)

        # child : model.subnet1.conv2 --> parent : model.subnet1
        self.assertEqual(model.subnet1, layers[id(model.subnet1.conv2)].parent_module)
        # child : model.subnet2.conv1 --> parent : model.subnet2
        self.assertEqual(model.subnet2, layers[id(model.subnet2.conv1)].parent_module)
        # child : model.fc2 --> parent : model
        self.assertEqual(model, layers[id(model.fc2)].parent_module)
Esempio n. 11
0
    def test_set_attributes_with_sequentials(self):
        """With a one-deep model"""
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv1 = nn.Conv2d(1, 10, 5)
                self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
                self.conv2_drop = nn.Dropout2d()
                self.subnet1 = nn.Sequential(
                    nn.Conv2d(1, 10, 5),
                    nn.ReLU(),
                    nn.Conv2d(10, 20, 5)
                )
                self.fc1 = nn.Linear(320, 50)
                self.subnet2 = nn.Sequential(
                    nn.Conv2d(1, 10, 5),
                    nn.ReLU(),
                    nn.Conv2d(1, 10, 5)
                )
                self.fc2 = nn.Linear(50, 10)

            def forward(self, *inputs):
                pass

        net = Net()
        model = net.to("cpu")

        # create layer attribute
        output_activation_shape = None

        layers = {id(model.subnet1[2]): Layer(model.subnet1[2], id(model.subnet1[2]),
                                              output_activation_shape),
                  id(model.subnet2[0]): Layer(model.subnet2[0], id(model.subnet2[0]),
                                              output_activation_shape),
                  id(model.fc2): Layer(model.fc2, id(model.fc2),
                                       output_activation_shape)}

        LayerDatabase.set_reference_to_parent_module(model, layers)

        # child : model.subnet1.2 --> parent : model.subnet1
        self.assertEqual(model.subnet1, layers[id(model.subnet1[2])].parent_module)
        # child : model.subnet2.1 --> parent : model.subnet2
        self.assertEqual(model.subnet2, layers[id(model.subnet2[0])].parent_module)
        # child : model.fc2 --> parent : model
        self.assertEqual(model, layers[id(model.fc2)].parent_module)
Esempio n. 12
0
    def _get_layer_pairs(layer_db: LayerDatabase, module_comp_ratio_pairs: List[ModuleCompRatioPair]):
        layer_comp_ratio_pairs = []

        for pair in module_comp_ratio_pairs:
            layer_comp_ratio_pair = LayerCompRatioPair(layer_db.find_layer_by_module(pair.module),
                                                       pair.comp_ratio)
            layer_comp_ratio_pairs.append(layer_comp_ratio_pair)

        return layer_comp_ratio_pairs
Esempio n. 13
0
    def create_spatial_svd_algo(cls, model: torch.nn.Module, eval_callback: EvalFunction, eval_iterations,
                                input_shape: Tuple, cost_metric: CostMetric,
                                params: SpatialSvdParameters, bokeh_session: BokehServerSession) -> CompressionAlgo:
        """
        Factory method to construct SpatialSvdCompressionAlgo

        :param model: Model to compress
        :param eval_callback: Evaluation callback for the model
        :param eval_iterations: Evaluation iterations
        :param input_shape: Shape of the input tensor for model
        :param cost_metric: Cost metric (mac or memory)
        :param params: Spatial SVD compression parameters
        :param bokeh_session: The Bokeh Session to display plots
        :return: An instance of SpatialSvdCompressionAlgo
        """

        # pylint: disable=too-many-locals
        # Rationale: Factory functions unfortunately need to deal with a lot of parameters

        # Create a layer database
        layer_db = LayerDatabase(model, input_shape)
        use_cuda = next(model.parameters()).is_cuda

        # Create a pruner
        pruner = SpatialSvdPruner()
        cost_calculator = SpatialSvdCostCalculator()
        comp_ratio_rounding_algo = RankRounder(params.multiplicity, cost_calculator)

        # Create a comp-ratio selection algorithm
        if params.mode == SpatialSvdParameters.Mode.auto:
            greedy_params = params.mode_params.greedy_params
            comp_ratio_select_algo = GreedyCompRatioSelectAlgo(layer_db, pruner, cost_calculator, eval_callback,
                                                               eval_iterations, cost_metric,
                                                               greedy_params.target_comp_ratio,
                                                               greedy_params.num_comp_ratio_candidates,
                                                               greedy_params.use_monotonic_fit,
                                                               greedy_params.saved_eval_scores_dict,
                                                               comp_ratio_rounding_algo, use_cuda,
                                                               bokeh_session=bokeh_session)
            layer_selector = ConvNoDepthwiseLayerSelector()
            modules_to_ignore = params.mode_params.modules_to_ignore
        else:
            # Convert (module,comp-ratio) pairs to (layer,comp-ratio) pairs
            layer_comp_ratio_pairs = cls._get_layer_pairs(layer_db, params.mode_params.list_of_module_comp_ratio_pairs)

            comp_ratio_select_algo = ManualCompRatioSelectAlgo(layer_db,
                                                               layer_comp_ratio_pairs,
                                                               comp_ratio_rounding_algo, cost_metric=cost_metric)

            layer_selector = ManualLayerSelector(layer_comp_ratio_pairs)
            modules_to_ignore = []

        # Create the overall Spatial SVD compression algorithm
        spatial_svd_algo = CompressionAlgo(layer_db, comp_ratio_select_algo, pruner, eval_callback,
                                           layer_selector, modules_to_ignore, cost_calculator, use_cuda)

        return spatial_svd_algo
    def test_eval_scores_with_spatial_svd_pruner(self):

        pruner = SpatialSvdPruner()
        eval_func = unittest.mock.MagicMock()
        eval_func.side_effect = [
            90, 80, 70, 60, 50, 40, 30, 20, 10, 91, 81, 71, 61, 51, 41, 31, 21,
            11
        ]

        model = mnist_torch_model.Net()

        # Create a layer database
        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))

        layer1 = layer_db.find_layer_by_name('conv1')
        layer2 = layer_db.find_layer_by_name('conv2')
        layer_db.mark_picked_layers([layer1, layer2])

        # Instantiate child
        greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            eval_func,
            20,
            CostMetric.mac,
            0.5,
            10,
            True,
            None,
            None,
            True,
            bokeh_session=None)
        dict = greedy_algo._compute_eval_scores_for_all_comp_ratio_candidates()

        print()
        print(dict)
        self.assertEqual(90, dict['conv1'][Decimal('0.1')])

        self.assertEqual(51, dict['conv2'][Decimal('0.5')])
        self.assertEqual(21, dict['conv2'][Decimal('0.8')])
Esempio n. 15
0
    def test_prune_model_2_layers(self):

        model = mnist_torch_model.Net()

        # Create a layer database
        orig_layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        # Copy the db
        comp_layer_db = copy.deepcopy(orig_layer_db)

        conv1 = comp_layer_db.find_layer_by_name('conv1')
        conv2 = comp_layer_db.find_layer_by_name('conv2')
        pruner = SpatialSvdPruner()

        layer_db = pruner.prune_model(orig_layer_db, [
            LayerCompRatioPair(conv1, Decimal(0.5)),
            LayerCompRatioPair(conv2, Decimal(0.5))
        ],
                                      CostMetric.mac,
                                      trainer=None)

        conv1_a = layer_db.find_layer_by_name('conv1.0')
        conv1_b = layer_db.find_layer_by_name('conv1.1')

        self.assertEqual((5, 1), conv1_a.module.kernel_size)
        self.assertEqual(1, conv1_a.module.in_channels)
        self.assertEqual(2, conv1_a.module.out_channels)

        self.assertEqual((1, 5), conv1_b.module.kernel_size)
        self.assertEqual(2, conv1_b.module.in_channels)
        self.assertEqual(32, conv1_b.module.out_channels)

        conv2_a = layer_db.find_layer_by_name('conv2.0')
        conv2_b = layer_db.find_layer_by_name('conv2.1')

        self.assertEqual((5, 1), conv2_a.module.kernel_size)
        self.assertEqual(32, conv2_a.module.in_channels)
        self.assertEqual(53, conv2_a.module.out_channels)

        self.assertEqual((1, 5), conv2_b.module.kernel_size)
        self.assertEqual(53, conv2_b.module.in_channels)
        self.assertEqual(64, conv2_b.module.out_channels)

        self.assertTrue(isinstance(layer_db.model.conv1, torch.nn.Sequential))
        self.assertTrue(isinstance(layer_db.model.conv2, torch.nn.Sequential))

        for layer in layer_db:
            print("Layer: " + layer.name)
            print("   Module: " + str(layer.module))

        print(layer_db.model)
Esempio n. 16
0
    def test_split_conv_layer_with_mo(self):

        logger.debug(self.id())
        model = mnist_model.Net().to("cpu")

        layer_database = LayerDatabase(model=model, input_shape=(1, 1, 28, 28))

        with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
            svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1,
                            input_shape=(1, 1, 28, 28),
                            compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory,
                            layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)

        conv2 = layer_database.find_layer_by_module(model.conv2)
        pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([conv2], aimet_common.defs.CostMetric.mac, svd._svd_lib_ref)

        split_layer = svd_pruner_deprecated.DeprecatedSvdPruner
        seq, conv_a, conv_b = split_layer.prune_layer(conv2, 28, svd._svd_lib_ref)

        print('\n')
        weight_arr = conv_a.module.weight.detach().numpy().flatten()
        weight_arr = weight_arr[0:10]
        print(weight_arr)

        self.assertEqual((28, model.conv2.in_channels, 1, 1), conv_a.module.weight.shape)
        self.assertEqual([28], list(conv_a.module.bias.shape))
        self.assertEqual((model.conv2.out_channels, 28, 5, 5), conv_b.module.weight.shape)
        self.assertEqual([model.conv2.out_channels], list(conv_b.module.bias.shape))

        self.assertEqual(model.conv2.stride, conv_a.module.stride)
        self.assertEqual(model.conv2.stride, conv_b.module.stride)

        self.assertEqual((0, 0), conv_a.module.padding)
        self.assertEqual(model.conv2.padding, conv_b.module.padding)

        self.assertEqual((1, 1), conv_a.module.kernel_size)
        self.assertEqual(model.conv2.kernel_size, conv_b.module.kernel_size)
Esempio n. 17
0
    def test_prune_layer(self):

        model = mnist_torch_model.Net()

        # Create a layer database
        orig_layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        # Copy the db
        comp_layer_db = copy.deepcopy(orig_layer_db)

        conv1 = comp_layer_db.find_layer_by_name('conv1')
        spatial_svd_pruner = SpatialSvdPruner()
        spatial_svd_pruner._prune_layer(orig_layer_db, comp_layer_db, conv1,
                                        0.5, CostMetric.mac)

        conv1_a = comp_layer_db.find_layer_by_name('conv1.0')
        conv1_b = comp_layer_db.find_layer_by_name('conv1.1')

        self.assertEqual((5, 1), conv1_a.module.kernel_size)
        self.assertEqual(1, conv1_a.module.in_channels)
        self.assertEqual(2, conv1_a.module.out_channels)

        self.assertEqual((1, 5), conv1_b.module.kernel_size)
        self.assertEqual(2, conv1_b.module.in_channels)
        self.assertEqual(32, conv1_b.module.out_channels)

        self.assertTrue(
            isinstance(comp_layer_db.model.conv1, torch.nn.Sequential))

        for layer in comp_layer_db:
            print("Layer: " + layer.name)
            print("   Module: " + str(layer.module))

        print(comp_layer_db.model)

        # check the output shapes of two newly created split layers
        # first split layer output
        conv1_a_output = comp_layer_db.model.conv1[0](torch.rand(1, 1, 28, 28))

        # second split layer output
        conv1_b_output = comp_layer_db.model.conv1[1](conv1_a_output)

        self.assertEqual(conv1_a.output_shape, list(conv1_a_output.shape))
        self.assertEqual(conv1_b.output_shape, list(conv1_b_output.shape))
    def test_prune_layer_with_seq(self):
        """ Test end to end prune layer with resnet18"""

        batch_size = 2
        dataset_size = 1000
        number_of_batches = 1
        samples_per_image = 10
        num_reconstruction_samples = number_of_batches * batch_size * samples_per_image

        resnet18_model = models.resnet18(pretrained=True)
        # Create a layer database
        orig_layer_db = LayerDatabase(resnet18_model,
                                      input_shape=(1, 3, 224, 224))
        # Copy the db
        comp_layer_db = copy.deepcopy(orig_layer_db)

        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size,
                                              image_size=(3, 224, 224))

        input_channel_pruner = InputChannelPruner(
            data_loader=data_loader,
            input_shape=(1, 3, 224, 224),
            num_reconstruction_samples=num_reconstruction_samples,
            allow_custom_downsample_ops=True)

        conv_below_split = comp_layer_db.find_layer_by_name('layer1.1.conv1')
        input_channel_pruner._prune_layer(orig_layer_db, comp_layer_db,
                                          conv_below_split, 0.25,
                                          CostMetric.mac)

        # 64 * 0.25 = 16
        self.assertEqual(comp_layer_db.model.layer1[1].conv1[1].in_channels,
                         16)
        self.assertEqual(comp_layer_db.model.layer1[1].conv1[1].out_channels,
                         64)
        self.assertEqual(
            list(comp_layer_db.model.layer1[1].conv1[1].weight.shape),
            [64, 16, 3, 3])
Esempio n. 19
0
    def _winnow_and_reconstruct_layer(self, orig_layer_db: LayerDatabase,
                                      comp_layer_db: LayerDatabase,
                                      layer: Layer, comp_ratio: float,
                                      perform_reconstruction: bool):
        """
        Replaces a given layer within the comp_layer_db with a pruned version of the layer

        :param orig_layer_db: original Layer database
        :param comp_layer_db: Layer database, will be modified
        :param layer: Layer to prune
        :param comp_ratio: compression - ratio
        :return:
        """
        # 1) channel selection
        prune_indices = self._select_inp_channels(layer.module, comp_ratio)

        # 2) winnow - in place API
        _, module_list = winnow_model(
            comp_layer_db.model,
            self._input_shape, [(layer.module, prune_indices)],
            reshape=self._allow_custom_downsample_ops,
            in_place=True)

        # 3) data sub sampling and reconstruction
        if perform_reconstruction:
            # get original layer reference
            orig_layer = orig_layer_db.find_layer_by_name(layer.name)
            self._data_subsample_and_reconstruction(orig_layer.module,
                                                    layer.module,
                                                    orig_layer_db.model,
                                                    comp_layer_db.model)

        # 4) update layer database
        if module_list:
            self._update_layer_database_after_winnowing(
                comp_layer_db, module_list)
Esempio n. 20
0
    def test_prune_model_2_layers(self):

        model = mnist_model.Net()

        # Create a layer database
        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))

        fc1 = layer_db.find_layer_by_name('fc1')
        conv2 = layer_db.find_layer_by_name('conv2')
        pruner = WeightSvdPruner()

        layer_db = pruner.prune_model(layer_db, [LayerCompRatioPair(fc1, Decimal(0.5)),
                                                 LayerCompRatioPair(conv2, Decimal(0.5))], aimet_common.defs.CostMetric.mac,
                                      trainer=None)

        fc1_a = layer_db.find_layer_by_name('fc1.0')
        fc1_b = layer_db.find_layer_by_name('fc1.1')

        self.assertEqual(3136, fc1_a.module.in_features)
        self.assertEqual(1024, fc1_b.module.out_features)

        conv2_a = layer_db.find_layer_by_name('conv2.0')
        conv2_b = layer_db.find_layer_by_name('conv2.1')

        self.assertEqual((1, 1), conv2_a.module.kernel_size)
        self.assertEqual(32, conv2_a.module.in_channels)
        self.assertEqual(15, conv2_a.module.out_channels)

        self.assertEqual((5, 5), conv2_b.module.kernel_size)
        self.assertEqual(15, conv2_b.module.in_channels)
        self.assertEqual(64, conv2_b.module.out_channels)

        self.assertTrue(isinstance(layer_db.model.fc1, nn.Sequential))
        self.assertTrue(isinstance(layer_db.model.conv2, nn.Sequential))

        for layer in layer_db:
            print("Layer: " + layer.name)
            print("   Module: " + str(layer.module))

        print(layer_db.model)
    def test_comp_ratio_select_tar(self):

        compute_model_cost = unittest.mock.MagicMock()
        pruner = unittest.mock.MagicMock()

        eval_func = unittest.mock.MagicMock()
        eval_func.side_effect = [
            0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65,
            0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.97, 1.0, 0.1, 0.15, 0.2, 0.25,
            0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
            0.9, 0.95, 0.97, 1.0, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
            0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.97, 1.0,
            0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65,
            0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.97, 1.0
        ]

        compute_model_cost.return_value = (500, 500)

        compute_network_cost = unittest.mock.MagicMock()
        compute_network_cost.return_value = (500, 500)

        model = mnist_torch_model.Net().to('cpu')

        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))
        layer1 = layer_db.find_layer_by_name('conv2')
        layer_db.mark_picked_layers([layer1])
        layer2 = layer_db.find_layer_by_name('fc2')
        layer_db.mark_picked_layers([layer2])
        layer3 = layer_db.find_layer_by_name('fc1')
        layer_db.mark_picked_layers([layer3])

        # Instantiate child
        tar_algo = comp_ratio_select.TarRankSelectAlgo(
            layer_db=layer_db,
            pruner=pruner,
            cost_calculator=WeightSvdCostCalculator(),
            eval_func=eval_func,
            eval_iterations=20,
            cost_metric=CostMetric.mac,
            num_rank_indices=20,
            use_cuda=False,
            pymo_utils_lib=pymo_utils)

        tar_algo._svd_lib_ref = create_autospec(pymo.Svd, instance=True)

        tar_algo._svd_lib_ref.SetCandidateRanks = unittest.mock.MagicMock()
        tar_algo._svd_lib_ref.SetCandidateRanks.return_value = 20

        tar_algo._num_rank_indices = 20
        with unittest.mock.patch(
                'aimet_common.cost_calculator.CostCalculator.calculate_comp_ratio_given_rank'
        ) as calculate_comp_ratio_given_rank:
            calculate_comp_ratio_given_rank.side_effect = [
                0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2,
                0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4,
                0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,
                0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
                0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0
            ]

            layer_comp_ratio_list, stats = tar_algo.select_per_layer_comp_ratios(
            )

            self.assertEqual(layer_comp_ratio_list[2].eval_score, 0.97)
            self.assertEqual(layer_comp_ratio_list[2].comp_ratio, 1.0)
Esempio n. 22
0
    def create_weight_svd_algo(cls, model: torch.nn.Module, eval_callback: EvalFunction, eval_iterations,
                               input_shape: Tuple, cost_metric: CostMetric,
                               params: WeightSvdParameters, bokeh_session) -> CompressionAlgo:
        """
        Factory method to construct WeightSvdCompressionAlgo

        :param model: Model to compress
        :param eval_callback: Evaluation callback for the model
        :param eval_iterations: Evaluation iterations
        :param input_shape: Shape of the input tensor for model
        :param cost_metric: Cost metric (mac or memory)
        :param rank_select_scheme: rank selection scheme. Check enum for allowed values
        :param params: Weight SVD compression parameters
        :return: An instance of WeightSvdCompressionAlgo
        """

        # pylint: disable=too-many-locals
        # Rationale: Factory functions unfortunately need to deal with a lot of parameters

        # Create a layer database
        layer_db = LayerDatabase(model, input_shape)
        use_cuda = next(model.parameters()).is_cuda

        # Create a pruner
        pruner = WeightSvdPruner()
        cost_calculator = WeightSvdCostCalculator()
        comp_ratio_rounding_algo = RankRounder(params.multiplicity, cost_calculator)

        # Create a comp-ratio selection algorithm
        if params.mode == WeightSvdParameters.Mode.auto:
            # greedy
            if params.mode_params.rank_select_scheme is RankSelectScheme.greedy:
                greedy_params = params.mode_params.select_params
                comp_ratio_select_algo = GreedyCompRatioSelectAlgo(layer_db=layer_db,
                                                                   pruner=pruner,
                                                                   cost_calculator=cost_calculator,
                                                                   eval_func=eval_callback,
                                                                   eval_iterations=eval_iterations,
                                                                   cost_metric=cost_metric,
                                                                   target_comp_ratio=greedy_params.target_comp_ratio,
                                                                   num_candidates=greedy_params.num_comp_ratio_candidates,
                                                                   use_monotonic_fit=greedy_params.use_monotonic_fit,
                                                                   saved_eval_scores_dict=greedy_params.saved_eval_scores_dict,
                                                                   comp_ratio_rounding_algo=comp_ratio_rounding_algo,
                                                                   use_cuda=use_cuda,
                                                                   bokeh_session=bokeh_session)
            # TAR method
            elif params.mode_params.rank_select_scheme is RankSelectScheme.tar:
                tar_params = params.mode_params.select_params
                comp_ratio_select_algo = TarRankSelectAlgo(layer_db=layer_db, pruner=pruner,
                                                           cost_calculator=cost_calculator,
                                                           eval_func=eval_callback,
                                                           eval_iterations=eval_iterations,
                                                           cost_metric=cost_metric,
                                                           num_rank_indices=tar_params.num_rank_indices,
                                                           use_cuda=use_cuda, pymo_utils_lib=pymo_utils)
            else:
                raise ValueError("Unknown Rank selection scheme: {}".format(params.AutoModeParams.rank_select_scheme))

            layer_selector = ConvFcLayerSelector()
            modules_to_ignore = params.mode_params.modules_to_ignore

        else:
            # Convert (module,comp-ratio) pairs to (layer,comp-ratio) pairs
            layer_comp_ratio_pairs = cls._get_layer_pairs(layer_db, params.mode_params.list_of_module_comp_ratio_pairs)

            comp_ratio_select_algo = ManualCompRatioSelectAlgo(layer_db,
                                                               layer_comp_ratio_pairs,
                                                               comp_ratio_rounding_algo, cost_metric=cost_metric)

            layer_selector = ManualLayerSelector(layer_comp_ratio_pairs)
            modules_to_ignore = []

        # Create the overall Weight SVD compression algorithm
        weight_svd_algo = CompressionAlgo(layer_db, comp_ratio_select_algo, pruner, eval_callback,
                                          layer_selector, modules_to_ignore, cost_calculator, use_cuda)

        return weight_svd_algo
    def test_prune_model(self):
        """Test end to end prune model with Mnist"""
        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv1 = nn.Conv2d(1, 10, kernel_size=3)
                self.max_pool2d = nn.MaxPool2d(2)
                self.relu1 = nn.ReLU()
                self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
                self.relu2 = nn.ReLU()
                self.conv3 = nn.Conv2d(20, 30, kernel_size=3)
                self.relu3 = nn.ReLU()
                self.conv4 = nn.Conv2d(30, 40, kernel_size=3)
                self.relu4 = nn.ReLU()
                self.fc1 = nn.Linear(7 * 7 * 40, 300)
                self.relu5 = nn.ReLU()
                self.fc2 = nn.Linear(300, 10)
                self.log_softmax = nn.LogSoftmax(dim=1)

            def forward(self, x):
                x = self.relu1(self.max_pool2d(self.conv1(x)))
                x = self.relu2(self.conv2(x))
                x = self.relu3(self.conv3(x))
                x = self.relu4(self.conv4(x))
                x = x.view(x.size(0), -1)
                x = self.relu5(self.fc1(x))
                x = self.fc2(x)
                return self.log_softmax(x)

        orig_model = Net()
        orig_model.eval()
        # Create a layer database
        orig_layer_db = LayerDatabase(orig_model, input_shape=(1, 1, 28, 28))
        dataset_size = 1000
        batch_size = 10
        # max out number of batches
        number_of_batches = 100
        samples_per_image = 10

        # create fake data loader with image size (1, 28, 28)
        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size)

        input_channel_pruner = InputChannelPruner(
            data_loader=data_loader,
            input_shape=(1, 1, 28, 28),
            num_reconstruction_samples=number_of_batches,
            allow_custom_downsample_ops=True)

        # keeping compression ratio = 0.5 for all layers
        layer_comp_ratio_list = [
            LayerCompRatioPair(Layer(orig_model.conv4, 'conv4', None), 0.5),
            LayerCompRatioPair(Layer(orig_model.conv3, 'conv3', None), 0.5),
            LayerCompRatioPair(Layer(orig_model.conv2, 'conv2', None), 0.5)
        ]

        comp_layer_db = input_channel_pruner.prune_model(orig_layer_db,
                                                         layer_comp_ratio_list,
                                                         CostMetric.mac,
                                                         trainer=None)

        self.assertEqual(comp_layer_db.model.conv2.in_channels, 5)
        self.assertEqual(comp_layer_db.model.conv2.out_channels, 10)

        self.assertEqual(comp_layer_db.model.conv3.in_channels, 10)
        self.assertEqual(comp_layer_db.model.conv3.out_channels, 15)

        self.assertEqual(comp_layer_db.model.conv4.in_channels, 15)
        self.assertEqual(comp_layer_db.model.conv4.out_channels, 40)
    def test_prune_model_with_seq(self):
        """Test end to end prune model with resnet18"""

        batch_size = 2
        dataset_size = 1000
        number_of_batches = 1
        samples_per_image = 10
        num_reconstruction_samples = number_of_batches * batch_size * samples_per_image

        resnet18_model = models.resnet18(pretrained=True)
        resnet18_model.eval()

        # Create a layer database
        orig_layer_db = LayerDatabase(resnet18_model,
                                      input_shape=(1, 3, 224, 224))

        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size,
                                              image_size=(3, 224, 224))

        input_channel_pruner = InputChannelPruner(
            data_loader=data_loader,
            input_shape=(1, 3, 224, 224),
            num_reconstruction_samples=num_reconstruction_samples,
            allow_custom_downsample_ops=True)

        # keeping compression ratio = 0.5 for all layers
        layer_comp_ratio_list = [
            LayerCompRatioPair(
                Layer(resnet18_model.layer4[1].conv1, 'layer4.1.conv1', None),
                0.5),
            LayerCompRatioPair(
                Layer(resnet18_model.layer3[1].conv1, 'layer3.1.conv1', None),
                0.5),
            LayerCompRatioPair(
                Layer(resnet18_model.layer2[1].conv1, 'layer2.1.conv1', None),
                0.5),
            LayerCompRatioPair(
                Layer(resnet18_model.layer1[1].conv1, 'layer1.1.conv1', None),
                0.5),
            LayerCompRatioPair(
                Layer(resnet18_model.layer1[0].conv2, 'layer1.0.conv2', None),
                0.5)
        ]

        comp_layer_db = input_channel_pruner.prune_model(orig_layer_db,
                                                         layer_comp_ratio_list,
                                                         CostMetric.mac,
                                                         trainer=None)

        # 1) not below split
        self.assertEqual(comp_layer_db.model.layer1[0].conv2.in_channels, 32)
        self.assertEqual(comp_layer_db.model.layer1[0].conv2.out_channels, 64)
        self.assertEqual(
            list(comp_layer_db.model.layer1[0].conv2.weight.shape),
            [64, 32, 3, 3])
        # impacted
        self.assertEqual(comp_layer_db.model.layer1[0].conv1.in_channels, 64)
        self.assertEqual(comp_layer_db.model.layer1[0].conv1.out_channels, 32)
        self.assertEqual(
            list(comp_layer_db.model.layer1[0].conv1.weight.shape),
            [32, 64, 3, 3])

        # 2) below split

        # 64 * .5
        self.assertEqual(comp_layer_db.model.layer1[1].conv1[1].in_channels,
                         32)
        self.assertEqual(comp_layer_db.model.layer1[1].conv1[1].out_channels,
                         64)
        self.assertEqual(
            list(comp_layer_db.model.layer1[1].conv1[1].weight.shape),
            [64, 32, 3, 3])

        # 128 * .5
        self.assertEqual(comp_layer_db.model.layer2[1].conv1[1].in_channels,
                         64)
        self.assertEqual(comp_layer_db.model.layer2[1].conv1[1].out_channels,
                         128)
        self.assertEqual(
            list(comp_layer_db.model.layer2[1].conv1[1].weight.shape),
            [128, 64, 3, 3])

        # 256 * .5
        self.assertEqual(comp_layer_db.model.layer3[1].conv1[1].in_channels,
                         128)
        self.assertEqual(comp_layer_db.model.layer3[1].conv1[1].out_channels,
                         256)
        self.assertEqual(
            list(comp_layer_db.model.layer3[1].conv1[1].weight.shape),
            [256, 128, 3, 3])

        # 512 * .5
        self.assertEqual(comp_layer_db.model.layer4[1].conv1[1].in_channels,
                         256)
        self.assertEqual(comp_layer_db.model.layer4[1].conv1[1].out_channels,
                         512)
        self.assertEqual(
            list(comp_layer_db.model.layer4[1].conv1[1].weight.shape),
            [512, 256, 3, 3])
    def test_select_per_layer_comp_ratios(self):

        pruner = unittest.mock.MagicMock()
        eval_func = unittest.mock.MagicMock()
        rounding_algo = unittest.mock.MagicMock()
        rounding_algo.round.side_effect = [
            0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.3, 0.4,
            0.5, 0.6, 0.7, 0.8, 0.9
        ]
        eval_func.side_effect = [
            10, 20, 30, 40, 50, 60, 70, 80, 90, 11, 21, 31, 35, 40, 45, 50, 55,
            60
        ]

        model = mnist_torch_model.Net()
        layer_db = LayerDatabase(model, input_shape=(1, 1, 28, 28))

        layer1 = layer_db.find_layer_by_name('conv1')
        layer2 = layer_db.find_layer_by_name('conv2')
        selected_layers = [layer1, layer2]
        layer_db.mark_picked_layers([layer1, layer2])

        try:
            os.remove('./data/greedy_selection_eval_scores_dict.pkl')
        except OSError:
            pass

        # Instantiate child
        greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            eval_func,
            20,
            CostMetric.mac,
            Decimal(0.6),
            10,
            True,
            None,
            rounding_algo,
            False,
            bokeh_session=None)

        layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios(
        )

        original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)

        for layer in layer_db:
            if layer not in selected_layers:
                layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
        compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(
            layer_db, layer_comp_ratio_list, CostMetric.mac)
        rounding_algo.round.side_effect = [
            0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.3, 0.4,
            0.5, 0.6, 0.7, 0.8, 0.9
        ]
        actual_compression_ratio = compressed_cost.mac / original_cost.mac
        self.assertTrue(
            math.isclose(Decimal(0.6), actual_compression_ratio, abs_tol=0.05))
        self.assertTrue(
            os.path.isfile('./data/greedy_selection_eval_scores_dict.pkl'))

        print('\n')
        for pair in layer_comp_ratio_list:
            print(pair)

        # lets repeat with a saved eval_dict
        greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(
            layer_db,
            pruner,
            SpatialSvdCostCalculator(),
            eval_func,
            20,
            CostMetric.mac,
            Decimal(0.6),
            10,
            True,
            './data/greedy_selection_eval_scores_dict.pkl',
            rounding_algo,
            False,
            bokeh_session=None)
        layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios(
        )

        original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)

        for layer in layer_db:
            if layer not in selected_layers:
                layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
        compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(
            layer_db, layer_comp_ratio_list, CostMetric.mac)

        actual_compression_ratio = compressed_cost.mac / original_cost.mac
        self.assertTrue(
            math.isclose(Decimal(0.6), actual_compression_ratio, abs_tol=0.05))

        print('\n')
        for pair in layer_comp_ratio_list:
            print(pair)