コード例 #1
0
def parse_basic_pruner(pruner_config: Dict[str, str],
                       config_list: List[Dict[str, Any]],
                       vessel: CompressionVessel):
    """
    Parse basic pruner and model-related objects used by pruning scheduler.
    """
    model, finetuner, evaluator, dummy_input, trainer, optimizer_helper, criterion, device = vessel.export(
    )
    if pruner_config['pruner_type'] == 'L1NormPruner':
        from nni.compression.pytorch.pruning import L1NormPruner
        basic_pruner = L1NormPruner(model=model,
                                    config_list=config_list,
                                    mode=pruner_config['mode'],
                                    dummy_input=dummy_input)
    elif pruner_config['pruner_type'] == 'TaylorFOWeightPruner':
        from nni.compression.pytorch.pruning import TaylorFOWeightPruner
        basic_pruner = TaylorFOWeightPruner(
            model=model,
            config_list=config_list,
            trainer=trainer,
            traced_optimizer=optimizer_helper,
            criterion=criterion,
            training_batches=pruner_config['training_batches'],
            mode=pruner_config['mode'],
            dummy_input=dummy_input)
    else:
        raise ValueError('Unsupported basic pruner type {}'.format(
            pruner_config.pruner_type))
    return basic_pruner, model, finetuner, evaluator, dummy_input, device
コード例 #2
0
ファイル: test_scheduler.py プロジェクト: microsoft/nni
    def test_pruning_scheduler(self):
        model = TorchModel()
        config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]

        task_generator = AGPTaskGenerator(1, model, config_list)
        pruner = L1NormPruner(model, config_list)
        scheduler = PruningScheduler(pruner, task_generator)

        scheduler.compress()
コード例 #3
0
 def test_l1_norm_pruner(self):
     model = TorchModel()
     config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
     pruner = L1NormPruner(model=model, config_list=config_list, mode='dependency_aware',
                           dummy_input=torch.rand(10, 1, 28, 28))
     pruned_model, masks = pruner.compress()
     pruner._unwrap_model()
     sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
     assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
コード例 #4
0
ファイル: test_pruning_wrapper.py プロジェクト: microsoft/nni
 def test_pruner_module_wrapper(self):
     model = TorchModel()
     conv1_weight = model.conv1.weight.data.clone()
     conv2_weight = model.conv2.weight.data.clone()
     config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
     pruner = L1NormPruner(model, config_list)
     _, masks = pruner.compress()
     model(torch.rand(10, 1, 28, 28))
     assert torch.equal(model.conv1.weight.data, conv1_weight)
     assert torch.equal(model.conv2.weight.data, conv2_weight)
     assert torch.equal(model.conv1.module.weight.data,
                        conv1_weight * masks['conv1']['weight'])
     assert torch.equal(model.conv2.module.weight.data,
                        conv2_weight * masks['conv2']['weight'])
コード例 #5
0
    print('Accuracy: {}%\n'.format(acc))
    return acc


if __name__ == '__main__':
    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    print('\nPre-train the model:')
    for i in range(5):
        trainer(model, optimizer, criterion, i)
        evaluator(model)

    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    pruner = L1NormPruner(model, config_list)
    _, masks = pruner.compress()

    print('\nThe accuracy with masks:')
    evaluator(model)

    pruner._unwrap_model()
    ModelSpeedup(model, dummy_input=torch.rand(10, 3, 32, 32).to(device), masks_file=masks).speedup_model()

    print('\nThe accuracy after speedup:')
    evaluator(model)

    # Need a new optimizer due to the modules in model will be replaced during speedup.
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    print('\nFinetune the model after speedup:')
    for i in range(5):
コード例 #6
0
ファイル: scheduler_torch.py プロジェクト: yinfupai/nni

if __name__ == '__main__':
    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    # pre-train the model
    for i in range(5):
        trainer(model, optimizer, criterion, i)

    # No need to pass model and config_list to pruner during initializing when using scheduler.
    pruner = L1NormPruner(None, None)

    # you can specify the log_dir, all intermediate results and best result will save under this folder.
    # if you don't want to keep intermediate results, you can set `keep_intermediate_result=False`.
    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    task_generator = AGPTaskGenerator(10,
                                      model,
                                      config_list,
                                      log_dir='.',
                                      keep_intermediate_result=True)

    dummy_input = torch.rand(10, 3, 32, 32).to(device)

    # if you just want to keep the final result as the best result, you can pass evaluator as None.
    # or the result with the highest score (given by evaluator) will be the best result.