Exemplo n.º 1
0
def channel_pruning_auto_mode():

    # Load trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=10)
    auto_params = ChannelPruningParameters.AutoModeParams(
        greedy_params, modules_to_ignore=[model.conv1])

    data_loader = mnist_torch_model.DataLoaderMnist(cuda=True,
                                                    seed=1,
                                                    shuffle=True)
    params = ChannelPruningParameters(data_loader=data_loader.train_loader,
                                      num_reconstruction_samples=500,
                                      allow_custom_downsample_ops=True,
                                      mode=ChannelPruningParameters.Mode.auto,
                                      params=auto_params)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.channel_pruning,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 2
0
def channel_pruning_manual_mode():

    # Load a trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    manual_params = ChannelPruningParameters.ManualModeParams(
        [ModuleCompRatioPair(model.conv2, 0.4)])

    data_loader = mnist_torch_model.DataLoaderMnist(cuda=True,
                                                    seed=1,
                                                    shuffle=True)
    params = ChannelPruningParameters(
        data_loader=data_loader.train_loader,
        num_reconstruction_samples=500,
        allow_custom_downsample_ops=True,
        mode=ChannelPruningParameters.Mode.manual,
        params=manual_params)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.channel_pruning,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 3
0
def weight_svd_manual_mode():

    # Load a trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    manual_params = WeightSvdParameters.ManualModeParams([
        ModuleCompRatioPair(model.conv1, 0.5),
        ModuleCompRatioPair(model.conv2, 0.4)
    ])
    params = WeightSvdParameters(mode=WeightSvdParameters.Mode.manual,
                                 params=manual_params,
                                 multiplicity=8)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.weight_svd,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 4
0
def weight_svd_auto_mode():

    # Load trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=10)
    rank_select = RankSelectScheme.greedy
    auto_params = WeightSvdParameters.AutoModeParams(
        rank_select_scheme=rank_select,
        select_params=greedy_params,
        modules_to_ignore=[model.conv1])

    params = WeightSvdParameters(mode=WeightSvdParameters.Mode.auto,
                                 params=auto_params)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.weight_svd,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
    def test_channel_pruning_manual(self):

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)
        torch.backends.cudnn.deterministic = True

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        data_loader = ImageNetDataLoader(image_dir, image_size, batch_size, num_workers)
        input_shape = (1, 3, 224, 224)
        model = models.resnet18(pretrained=True).to(torch.device('cuda'))
        manual_params = ChannelPruningParameters.ManualModeParams([ModuleCompRatioPair(model.layer1[0].conv2, 0.3),
                                                                   ModuleCompRatioPair(model.layer2[1].conv1, 0.5)])
        params = ChannelPruningParameters(data_loader.train_loader, 5000,
                                          True,
                                          aimet_torch.defs.ChannelPruningParameters.Mode.manual,
                                          manual_params, multiplicity=8)

        compressed_model, stats = ModelCompressor.compress_model(model, evaluate, 10, input_shape,
                                                                 aimet_common.defs.CompressionScheme.channel_pruning,
                                                                 cost_metric=aimet_common.defs.CostMetric.mac,
                                                                 parameters=params, visualization_url=None)
        baseline_model_accuracy = stats.baseline_model_accuracy
        compressed_best_model_accuracy = stats.compressed_model_accuracy
        self.assertTrue(baseline_model_accuracy >= compressed_best_model_accuracy)
        self.assertEqual(24, compressed_model.layer1[0].conv2.in_channels)
    def test_spatial_svd_with_fine_tuning(self):
        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)
        torch.backends.cudnn.deterministic = True

        AimetLogger.set_level_for_all_areas(logging.DEBUG)
        # load trained MNIST model
        data_loader = DataLoaderMnist(cuda=True, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)
        input_shape = (1, 1, 28, 28)
        model = torch.load(os.path.join('./', 'data', 'mnist_trained_on_GPU.pth'))
        modules_to_ignore = [model.conv1]
        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                                                    num_comp_ratio_candidates=10,
                                                                    use_monotonic_fit=True)
        auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params,
                                                                           modules_to_ignore=modules_to_ignore)

        params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params,
                                                       multiplicity=1)
        results = ModelCompressor.compress_model(model, mnist_model.evaluate, 10, input_shape,
                                                 aimet_common.defs.CompressionScheme.spatial_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 trainer=Trainer(), visualization_url=None)

        compressed_model, stats = results
        print(compressed_model)
        print(stats)
        self.assertTrue(math.isclose(float(stats.mac_compression_ratio), 0.7, abs_tol=0.1))
    def test_weight_svd_compress_manual(self):

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)
        torch.backends.cudnn.deterministic = True

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        input_shape = (1, 3, 224, 224)
        model = models.resnet18(pretrained=True).to(torch.device('cuda'))

        manual_params = aimet_torch.defs.WeightSvdParameters.ManualModeParams(
            [ModuleCompRatioPair(model.layer1[0].conv1, 0.5),
             ModuleCompRatioPair(model.layer2[1].conv2, 0.4), ModuleCompRatioPair(model.fc, 0.4)])

        params = aimet_torch.defs.WeightSvdParameters(aimet_torch.defs.WeightSvdParameters.Mode.manual, manual_params)

        results = ModelCompressor.compress_model(model, evaluate, 10, input_shape,
                                                 aimet_common.defs.CompressionScheme.weight_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)

        compressed_model, stats = results
        print(compressed_model)
        print(stats)

        self.assertTrue(isinstance(compressed_model.layer1[0].conv1, torch.nn.Sequential))
        self.assertEqual(math.floor(64 * 64 * 3 * 3 * 0.5 / (64 + 64 * 3 * 3)),
                         compressed_model.layer1[0].conv1[0].out_channels)

        self.assertTrue(isinstance(compressed_model.fc, torch.nn.Sequential))
        self.assertEqual(math.floor(512 * 1000 * 0.4 / (512 + 1000)), compressed_model.fc[0].out_features)
    def test_spatial_svd_compress_auto_multi_input_model(self):

        torch.manual_seed(1)
        numpy.random.seed(1)

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        input_shape = [(1, 1, 28, 28), (1, 1, 28, 28)]

        model = ModelWithTwoInputs()

        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.65),
                                                                    num_comp_ratio_candidates=4)

        auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params)

        params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params)

        mock_eval = FakeEvaluator(input_shape)
        mock_eval.return_values = [0.75, 0.50, 0.25, 0.75, 0.50, 0.25, 0.75, 0.50, 0.25,
                                   0.50, 0.50]

        results = ModelCompressor.compress_model(model=model, eval_callback=mock_eval, eval_iterations=5,
                                                 input_shape=input_shape,
                                                 compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)
        compressed_model, stats = results
        print(compressed_model)
        print(stats)

        self.assertEqual(8, compressed_model.conv2[0].out_channels)
        self.assertEqual((5, 1), compressed_model.conv2[0].kernel_size)
        self.assertTrue(math.isclose(float(stats.mac_compression_ratio), 0.25, abs_tol=0.01))
    def test_pickled_compression_ratios(self):

        visualization_url, process = start_bokeh_server_session(8002)

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        np.random.seed(1)
        torch.backends.cudnn.deterministic = True

        input_shape = (1, 3, 224, 224)
        model = models.resnet18(pretrained=True).to(torch.device('cuda'))

        modules_to_ignore = [model.conv1,
                             model.layer2[0].downsample[0],
                             model.layer3[0].downsample[0],
                             model.layer4[0].downsample[0],
                             model.layer4[1].conv1,
                             model.layer4[1].conv2
                             ]
        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.65),
                                                                    num_comp_ratio_candidates=10)

        auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params,
                                                                           modules_to_ignore=modules_to_ignore)

        params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params,
                                                       multiplicity=8)

        ModelCompressor.compress_model(model=model, eval_callback=evaluate, eval_iterations=5,
                                       input_shape=input_shape,
                                       compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
                                       cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                       visualization_url=None)

        comp_ratios_file_path = './data/greedy_selection_comp_ratios_list.pkl'
        eval_scores_path = './data/greedy_selection_eval_scores_dict.pkl'
        self.assertTrue(os.path.exists(comp_ratios_file_path))
        layer_comp_ratio_list = CompressionAlgo.unpickle_comp_ratios_list(comp_ratios_file_path)
        self.assertEqual(type(layer_comp_ratio_list), list)
        compression_visualizations = VisualizeCompression(visualization_url)
        compression_visualizations.display_eval_scores(eval_scores_path)
        compression_visualizations.display_comp_ratio_plot(comp_ratios_file_path)
        os.killpg(os.getpgid(process.pid), signal.SIGTERM)
Exemplo n.º 10
0
def model_compression_with_visualization():
    """
    Code example for compressing a model with a visualization url provided.
    """
    visualization_url, process = start_bokeh_server_session(8002)

    ImageNetDataLoader(image_dir, image_size, batch_size, num_workers)
    input_shape = (1, 3, 224, 224)
    model = models.resnet18(pretrained=True).to(torch.device('cuda'))

    modules_to_ignore = [model.conv1]

    greedy_params = aimet_common.defs.GreedySelectionParameters(
        target_comp_ratio=Decimal(0.65),
        num_comp_ratio_candidates=10,
        saved_eval_scores_dict='../data/resnet18_eval_scores.pkl')

    auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(
        greedy_params, modules_to_ignore=modules_to_ignore)

    params = aimet_torch.defs.SpatialSvdParameters(
        aimet_torch.defs.SpatialSvdParameters.Mode.auto,
        auto_params,
        multiplicity=8)

    # If no visualization URL is provided, during model compression execution no visualizations will be published.
    ModelCompressor.compress_model(
        model=model,
        eval_callback=evaluate,
        eval_iterations=5,
        input_shape=input_shape,
        compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
        cost_metric=aimet_common.defs.CostMetric.mac,
        parameters=params,
        visualization_url=None)

    comp_ratios_file_path = './data/greedy_selection_comp_ratios_list.pkl'
    eval_scores_path = '../data/resnet18_eval_scores.pkl'

    # A user can visualize the eval scores dictionary and optimal compression ratios by executing the following code.
    compression_visualizations = VisualizeCompression(visualization_url)
    compression_visualizations.display_eval_scores(eval_scores_path)
    compression_visualizations.display_comp_ratio_plot(comp_ratios_file_path)
    def test_channel_pruning_compress_auto_resnet(self):

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)
        torch.backends.cudnn.deterministic = True

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        data_loader = ImageNetDataLoader(image_dir, image_size, batch_size, num_workers)
        input_shape = (1, 3, 224, 224)
        model = models.resnet18(pretrained=False).to(torch.device('cuda'))
        model.eval()

        modules_to_ignore = [model.conv1,
                             model.layer2[0].downsample[0],
                             model.layer3[0].downsample[0],
                             model.layer4[0].downsample[0],
                             model.layer4[1].conv1,
                             model.layer4[1].conv2
                             ]

        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.65),
                                                                    num_comp_ratio_candidates=10,
                                                                    use_monotonic_fit=True,
                                                                    saved_eval_scores_dict=
                                                                   './data/resnet18_eval_scores.pkl')
        auto_params = ChannelPruningParameters.AutoModeParams(greedy_params,
                                                              modules_to_ignore=modules_to_ignore)

        # selecting single batch for reconstruction
        # num_reconstruction_samples = 50
        # 50 / 10 (samples_per_image) = 5 = batch size

        params = ChannelPruningParameters(data_loader=data_loader.train_loader,
                                          num_reconstruction_samples=50,
                                          allow_custom_downsample_ops=True,
                                          mode=aimet_torch.defs.ChannelPruningParameters.Mode.auto,
                                          params=auto_params, multiplicity=8)

        results = ModelCompressor.compress_model(model=model, eval_callback=evaluate, eval_iterations=5,
                                                 input_shape=input_shape,
                                                 compress_scheme=aimet_common.defs.CompressionScheme.channel_pruning,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)

        compressed_model, stats = results
        print(compressed_model)
        print(stats)
        self.assertNotEqual(model, compressed_model)
        self.assertTrue(0.6 < float(stats.mac_compression_ratio) < 0.65)
    def test_spatial_svd_compress_auto_with_high_multiplicity(self):

        torch.manual_seed(1)
        numpy.random.seed(1)

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        input_shape = (1, 3, 224, 224)
        model = models.resnet18()

        modules_to_ignore = [model.conv1,
                             model.layer1[0].conv1, model.layer1[0].conv2,
                             model.layer1[1].conv1, model.layer1[1].conv2,
                             model.layer2[0].downsample[0],

                             model.layer3[0].conv1, model.layer3[0].conv2,
                             model.layer3[1].conv1, model.layer3[1].conv2,
                             model.layer3[0].downsample[0],

                             model.layer4[0].conv1, model.layer4[0].conv2,
                             model.layer4[0].downsample[0],
                             model.layer4[1].conv1,
                             model.layer4[1].conv2
                             ]

        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.65),
                                                                    num_comp_ratio_candidates=4)

        auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params,
                                                                           modules_to_ignore=modules_to_ignore)

        params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params,
                                                       multiplicity=64)

        mock_eval = FakeEvaluator(input_shape)
        mock_eval.return_values = [0.75, 0.50, 0.25, 0.75, 0.50, 0.25, 0.75, 0.50, 0.25, 0.75, 0.50, 0.25,
                                   0.50, 0.50]

        results = ModelCompressor.compress_model(model=model, eval_callback=mock_eval, eval_iterations=5,
                                                 input_shape=input_shape,
                                                 compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)
        compressed_model, stats = results
        print(compressed_model)
        print(stats)

        self.assertEqual(64, compressed_model.layer2[0].conv2[0].out_channels)
        self.assertEqual((3, 1), compressed_model.layer2[0].conv2[0].kernel_size)
        self.assertTrue(math.isclose(float(stats.mac_compression_ratio), 0.87, abs_tol=0.01))
Exemplo n.º 13
0
def weight_svd_auto_mode(model, comp_ratio=0.8, retrain=False):
    input_shape = coord_dataset.mgrid.shape

    # Specify the necessary parameters

    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(comp_ratio),
                                              num_comp_ratio_candidates=20)
    #tar_params = TarRankSelectionParameters(num_rank_indices=2)
    #rank_select = RankSelectScheme.tar
    rank_select = RankSelectScheme.greedy
    auto_params = WeightSvdParameters.AutoModeParams(rank_select_scheme=rank_select,
                                                     select_params=greedy_params,
                                                     )#modules_to_ignore=[model.conv1])

    params = WeightSvdParameters(mode=WeightSvdParameters.Mode.auto,
                                 params=auto_params)

    #Single call to compress the model
    results = ModelCompressor.compress_model(model,
                                             eval_callback=evaluate_model,
                                             eval_iterations=1,
                                             input_shape=input_shape,
                                             compress_scheme=CompressionScheme.weight_svd,
                                             cost_metric=CostMetric.memory,
                                             parameters=params)

    compressed_model, stats = results
    # torch.save(compressed_model,
    #            os.path.join(os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_' + str(comp_ratio) +'.pth')))
    #print(compressed_model)
    print(stats)     # Stats object can be pretty-printed easily
   # print(os.path.join(os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_.pth')))
    #res = check_metrics(dataloader, compressed_model, image_resolution)
    #print(res)
    loss_fn = partial(loss_functions.image_mse, None)
    if retrain:
        compressed_model = retrain_model(compressed_model, dataloader,2000, loss_fn, 0.00005, TRAINING_FLAGS['l1_reg'])
        torch.save(compressed_model,
                   os.path.join(
                       os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_' + str(comp_ratio) + '_retrained.pth')))
        res = check_metrics(dataloader, compressed_model, image_resolution)
        print(res)
    return compressed_model
    def test_weight_svd_compress_auto_high_multiplicity(self):

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)
        torch.backends.cudnn.deterministic = True

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        input_shape = (1, 3, 224, 224)
        model = models.resnet18(pretrained=True).to(torch.device('cuda'))
        modules_to_ignore = [model.conv1,
                             model.layer2[0].downsample[0],
                             model.layer3[0].downsample[0],
                             model.layer4[0].downsample[0],
                             model.layer4[1].conv1,
                             model.layer4[1].conv2,
                             model.fc
                             ]
        greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.7),
                                                                    num_comp_ratio_candidates=4,
                                                                    saved_eval_scores_dict=
                                                                   './data/resnet18_eval_scores.pkl')
        rank_select = RankSelectScheme.greedy
        auto_params = aimet_torch.defs.WeightSvdParameters.AutoModeParams(rank_select_scheme=rank_select,
                                                                          select_params=greedy_params,
                                                                          modules_to_ignore=modules_to_ignore)
        params = aimet_torch.defs.WeightSvdParameters(aimet_torch.defs.WeightSvdParameters.Mode.auto, auto_params,
                                                      multiplicity=64)

        results = ModelCompressor.compress_model(model, evaluate, 10, input_shape,
                                                 aimet_common.defs.CompressionScheme.weight_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)

        compressed_model, stats = results
        print(compressed_model)
        print(stats)

        self.assertFalse(isinstance(compressed_model.conv1, torch.nn.Sequential))
        self.assertFalse(isinstance(compressed_model.fc, torch.nn.Sequential))
    def test_spatial_svd_compress_manual(self):

        torch.cuda.empty_cache()
        torch.manual_seed(1)
        numpy.random.seed(1)

        AimetLogger.set_level_for_all_areas(logging.DEBUG)

        input_shape = (1, 3, 224, 224)
        model = models.resnet18()

        manual_params = aimet_torch.defs.SpatialSvdParameters.ManualModeParams(
            [ModuleCompRatioPair(model.layer1[0].conv1, 0.5),
             ModuleCompRatioPair(model.layer2[1].conv2, 0.4)])

        params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.manual, manual_params,
                                                       multiplicity=8)

        # Only used in this test for baseline and final accuracy - essentially a don't care
        mock_eval = MagicMock()
        mock_eval.return_value = 50

        results = ModelCompressor.compress_model(model=model, eval_callback=mock_eval, eval_iterations=5,
                                                 input_shape=input_shape,
                                                 compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
                                                 cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
                                                 visualization_url=None)

        compressed_model, stats = results
        print(compressed_model)
        print(stats)

        # Check that indeed weight svd was applied to some layer
        self.assertTrue(isinstance(compressed_model.layer1[0].conv1, torch.nn.Sequential))
        self.assertEqual(48, compressed_model.layer1[0].conv1[0].out_channels)
        self.assertEqual((3, 1), compressed_model.layer1[0].conv1[0].kernel_size)
        self.assertEqual((1, 0), compressed_model.layer1[0].conv1[0].padding)

        self.assertTrue(isinstance(compressed_model.layer2[1].conv2, torch.nn.Sequential))
        self.assertEqual(80, compressed_model.layer2[1].conv2[0].out_channels)