Beispiel #1
0
    def test_pruning_scheduler(self):
        model = TorchModel()
        config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]

        task_generator = AGPTaskGenerator(1, model, config_list)
        pruner = L1NormPruner(model, config_list)
        scheduler = PruningScheduler(pruner, task_generator)

        scheduler.compress()
Beispiel #2
0
 def test_l1_norm_pruner(self):
     model = TorchModel()
     config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
     pruner = L1NormPruner(model=model, config_list=config_list, mode='dependency_aware',
                           dummy_input=torch.rand(10, 1, 28, 28))
     pruned_model, masks = pruner.compress()
     pruner._unwrap_model()
     sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
     assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
Beispiel #3
0
if __name__ == '__main__':
    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    print('\nPre-train the model:')
    for i in range(5):
        trainer(model, optimizer, criterion, i)
        evaluator(model)

    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    pruner = L1NormPruner(model, config_list)
    _, masks = pruner.compress()

    print('\nThe accuracy with masks:')
    evaluator(model)

    pruner._unwrap_model()
    ModelSpeedup(model,
                 dummy_input=torch.rand(10, 3, 32, 32).to(device),
                 masks_file=masks).speedup_model()

    print('\nThe accuracy after speed up:')
    evaluator(model)

    # Need a new optimizer due to the modules in model will be replaced during speedup.
    optimizer = torch.optim.SGD(model.parameters(),
Beispiel #4
0

if __name__ == '__main__':
    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    # pre-train the model
    for i in range(5):
        trainer(model, optimizer, criterion, i)

    # No need to pass model and config_list to pruner during initializing when using scheduler.
    pruner = L1NormPruner(None, None)

    # you can specify the log_dir, all intermediate results and best result will save under this folder.
    # if you don't want to keep intermediate results, you can set `keep_intermediate_result=False`.
    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    task_generator = AGPTaskGenerator(10,
                                      model,
                                      config_list,
                                      log_dir='.',
                                      keep_intermediate_result=True)

    dummy_input = torch.rand(10, 3, 32, 32).to(device)

    # if you just want to keep the final result as the best result, you can pass evaluator as None.
    # or the result with the highest score (given by evaluator) will be the best result.