Esempio n. 1
0
parser = argparse.ArgumentParser()
parser.add_argument('--sparsity',
                    type=float,
                    default=0.5,
                    help='Sparsity ratio')
parser.add_argument('--save_dir',
                    type=str,
                    default='./',
                    help='Where to save retrained model')
args, _ = parser.parse_known_args()

device = 'cuda'

if __name__ == '__main__':
    sparse_model_path = os.path.join(args.save_dir, 'mynet_sparse.pth')
    assert os.path.exists(sparse_model_path), "No sparse model!"

    slim_model_path = os.path.join(args.save_dir, 'mynet_slim.pth')
    sparse_model = MyNet()
    sparse_model = load_weights(sparse_model, sparse_model_path)
    sparse_model.to(device)
    input_signature = torch.randn([1, 3, 32, 32], dtype=torch.float32)
    input_signature = input_signature.to(device)

    pruning_runner = get_pruning_runner(sparse_model, input_signature,
                                        'iterative')
    slim_model = pruning_runner.prune(removal_ratio=args.sparsity, mode='slim')
    torch.save(slim_model.state_dict(), slim_model_path)
    print('Convert sparse model to slim model done!')
Esempio n. 2
0
                                      num_workers=args.num_workers,
                                      shuffle=True,
                                      train=True,
                                      download=download)
    val_loader = get_dataloader_ddp(args.data_dir,
                                    batch_size,
                                    num_workers=args.num_workers,
                                    shuffle=False,
                                    train=False,
                                    download=download)

    model = MyNet()
    model = load_weights(model, args.pretrained)
    input_signature = torch.randn([1, 3, 32, 32], dtype=torch.float32)
    input_signature = input_signature.to(device)
    model = model.to(device)
    pruning_runner = get_pruning_runner(model, input_signature, 'iterative')

    model = pruning_runner.prune(removal_ratio=args.sparsity, mode='sparse')
    model = torch.nn.parallel.DistributedDataParallel(
        model,
        device_ids=[args.local_rank],
        output_device=args.local_rank,
        find_unused_parameters=True)
    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.epochs)
    best_acc1 = 0
Esempio n. 3
0
args, _ = parser.parse_known_args()

device = 'cuda'
gpus = get_gpus(args.gpus)

if __name__ == '__main__':
    model = MyNet()
    model = torch.nn.DataParallel(model, device_ids=gpus)
    batch_size = args.batch_size * len(gpus)

    if os.path.exists(args.data_dir):
        download = False
    else:
        download = True

    val_loader = get_dataloader(args.data_dir,
                                batch_size,
                                num_workers=args.num_workers,
                                shuffle=False,
                                train=False,
                                download=download)

    model.to(device)
    criterion = torch.nn.CrossEntropyLoss().cuda()
    if not os.path.exists(args.pretrained):
        print('Pretrained model do not exist!')
    model.load_state_dict(torch.load(args.pretrained))
    acc1, acc5 = evaluate(val_loader, model, criterion)
    print('acc1={},acc5={}'.format(acc1, acc5))