Esempio n. 1
0
    model.eval()
    correct = 0
    with torch.no_grad():
        for data, target in tqdm(iterable=test_loader, desc='Test'):
            data, target = data.to(device), target.to(device)
            output = model(data)
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
    acc = 100 * correct / len(test_loader.dataset)
    print('Accuracy: {}%\n'.format(acc))
    return acc


if __name__ == '__main__':
    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    print('\nPre-train the model:')
    for i in range(5):
        trainer(model, optimizer, criterion, i)
        evaluator(model)

    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    pruner = L1NormPruner(model, config_list)
    _, masks = pruner.compress()

    print('\nThe accuracy with masks:')
Esempio n. 2
0
    model.load_state_dict(best_state_dict)
    pre_flops, pre_params, _ = count_flops_params(
        model,
        torch.randn([128, 3, 32, 32]).to(device))
    g_epoch = 0

    # Start to prune and speedup
    print('\n' + '=' * 50 +
          ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)
    config_list = [{
        'total_sparsity': 0.5,
        'op_types': ['Conv2d'],
    }]

    # make sure you have used nni.trace to wrap the optimizer class before initialize
    traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(),
                                                  lr=0.01,
                                                  momentum=0.9,
                                                  weight_decay=5e-4)
    if 'apoz' in args.pruner:
        pruner = ActivationAPoZRankPruner(model,
                                          config_list,
                                          trainer,
                                          traced_optimizer,
                                          criterion,
                                          training_batches=20)
    else:
        pruner = ActivationMeanRankPruner(model,
                                          config_list,
                                          trainer,
                                          traced_optimizer,
Esempio n. 3
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='PyTorch Iterative Example for model comporession')
    parser.add_argument('--pretrain-epochs', type=int, default=10,
                        help='number of epochs to pretrain the model')
    parser.add_argument('--pruning-algo', type=str, default='l1',
                        choices=['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz',
                                 'mean_activation', 'taylorfo', 'admm'],
                        help='algorithm to evaluate weights to prune')
    parser.add_argument('--cool-down-rate', type=float, default=0.9,
                        help='Cool down rate of the temperature.')

    args = parser.parse_args()

    model = VGG().to(device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    # pre-train the model
    for i in range(args.pretrain_epochs):
        trainer(model, optimizer, criterion, i)
        evaluator(model)

    config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}]

    # evaluator in 'SimulatedAnnealingPruner' could not be None.
    pruner = SimulatedAnnealingPruner(model, config_list, pruning_algorithm=args.pruning_algo,
                                      evaluator=evaluator, cool_down_rate=args.cool_down_rate, finetuner=finetuner)
    pruner.compress()
    _, model, masks, _, _ = pruner.get_best_result()
    evaluator(model)
Esempio n. 4
0
            pre_best_acc = acc
            best_state_dict = model.state_dict()
    print("Best accuracy: {}".format(pre_best_acc))
    model.load_state_dict(best_state_dict)
    pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))
    g_epoch = 0

    # Start to prune and speedup
    print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)
    config_list = [{
        'sparsity': 0.8,
        'op_types': ['Conv2d'],
    }]

    # make sure you have used nni.trace to wrap the optimizer class before initialize
    traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
    pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1, granularity='coarse-grained')
    _, masks = pruner.compress()
    pruner.show_pruned_weights()

    pruner._unwrap_model()
    ModelSpeedup(model, torch.randn([128, 3, 32, 32]).to(device), masks).speedup_model()

    print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER PRUNING ' + '=' * 50)
    evaluator(model)

    # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage.
    print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50)
    optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs)

    best_acc = 0.0