Beispiel #1
0
     print('******Prune Quant model******')
     #checkpoint = torch.load('../prune/models_save/nin_refine.pth')
     checkpoint = torch.load(args.prune_quant)
     cfg = checkpoint['cfg']
     if args.model_type == 0:
         model = nin.Net(cfg=checkpoint['cfg'])
     else:
         model = nin_gc.Net(cfg=checkpoint['cfg'])
     model.load_state_dict(checkpoint['state_dict'])
     best_acc = 0
     print('***ori_model***\n', model)
     quantize.prepare(model, inplace=True, a_bits=args.a_bits,
                      w_bits=args.w_bits, q_type=args.q_type,
                      q_level=args.q_level, device=device,
                      weight_observer=args.weight_observer,
                      bn_fuse=args.bn_fuse,
                      bn_fuse_cali=args.bn_fuse_cali,
                      pretrained_model=args.pretrained_model,
                      qaft=args.qaft,
                      ptq=args.ptq,
                      percentile=args.percentile)
     print('\n***quant_model***\n', model)
 elif args.prune_qaft:
     print('******Prune QAFT model******')
     #checkpoint = torch.load('models_save/nin_bn_fused.pth')
     checkpoint = torch.load(args.prune_qaft)
     cfg = checkpoint['cfg']
     if args.model_type == 0:
         model = nin.Net(cfg=checkpoint['cfg'])
     else:
         model = nin_gc.Net(cfg=checkpoint['cfg'])
     print('***ori_model***\n', model)
Beispiel #2
0
        print('******Prune Quant model******')
        if args.model_type == 0:
            checkpoint = torch.load('../models_save/nin.pth')
            quant_model_train = nin.Net(cfg=checkpoint['cfg'])
        else:
            checkpoint = torch.load('../models_save/nin_gc.pth')
            quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])
    else:
        if args.model_type == 0:
            checkpoint = torch.load('../models_save/nin.pth')
            quant_model_train = nin.Net()
        else:
            checkpoint = torch.load('../models_save/nin_gc.pth')
            quant_model_train = nin_gc.Net()
    quant_bn_fused_model_inference = copy.deepcopy(quant_model_train)
    quantize.prepare(quant_model_train, inplace=True, A=args.A, W=args.W)
    quantize.prepare(quant_bn_fused_model_inference,
                     inplace=True,
                     A=args.A,
                     W=args.W,
                     quant_inference=True)
    quant_model_train.load_state_dict(checkpoint['state_dict'])
    quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])

    # ********************** quant_model_train ************************
    torch.save(quant_model_train, 'models_save/quant_model_train.pth')
    torch.save(quant_model_train.state_dict(),
               'models_save/quant_model_train_para.pth')
    model_array = np.array(quant_model_train)
    model_para_array = np.array(quant_model_train.state_dict())
    np.savetxt('models_save/quant_model_train.txt', [model_array],
Beispiel #3
0
            model = nin.Net()
        else:
            model = nin_gc.Net()
        best_acc = 0
        for m in model.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, 0, 0.01)
                if m.bias is not None:
                    init.zeros_(m.bias)
    print('***ori_model***\n', model)
    quantize.prepare(model,
                     inplace=True,
                     a_bits=args.a_bits,
                     w_bits=args.w_bits)
    print('\n***quant_model***\n', model)

    if not args.cpu:
        model.cuda()
        model = torch.nn.DataParallel(model,
                                      device_ids=range(
                                          torch.cuda.device_count()))

    base_lr = float(args.lr)
    param_dict = dict(model.named_parameters())
    params = []
    for key, value in param_dict.items():
        params += [{'params': [value], 'lr': base_lr, 'weight_decay': args.wd}]
Beispiel #4
0
        if args.model_type == 0:
            checkpoint = torch.load('../models_save/nin.pth')
            quant_model_train = nin.Net(cfg=checkpoint['cfg'])
        else:
            checkpoint = torch.load('../models_save/nin_gc.pth')
            quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])
    else:
        if args.model_type == 0:
            checkpoint = torch.load('../models_save/nin.pth')
            quant_model_train = nin.Net()
        else:
            checkpoint = torch.load('../models_save/nin_gc.pth')
            quant_model_train = nin_gc.Net()
    quant_model_inference = copy.deepcopy(quant_model_train)
    quantize.prepare(quant_model_train,
                     inplace=True,
                     a_bits=args.a_bits,
                     w_bits=args.w_bits)
    quantize.prepare(quant_model_inference,
                     inplace=True,
                     a_bits=args.a_bits,
                     w_bits=args.w_bits,
                     quant_inference=True)
    quant_model_train.load_state_dict(checkpoint['state_dict'])
    quant_model_inference.load_state_dict(checkpoint['state_dict'])

    # ********************** quant_model_train ************************
    torch.save(quant_model_train, 'models_save/quant_model_train.pth')
    torch.save(quant_model_train.state_dict(),
               'models_save/quant_model_train_para.pth')
    model_array = np.array(quant_model_train)
    model_para_array = np.array(quant_model_train.state_dict())
Beispiel #5
0
     print('******Prune Refine model******')
     #checkpoint = torch.load('../prune/models_save/nin_refine.pth')
     checkpoint = torch.load(args.prune_refine)
     cfg = checkpoint['cfg']
     if args.model_type == 0:
         model = nin.Net(cfg=checkpoint['cfg'])
     else:
         model = nin_gc.Net(cfg=checkpoint['cfg'])
     model.load_state_dict(checkpoint['state_dict'])
     best_acc = 0
     print('***ori_model***\n', model)
     quantize.prepare(model,
                      inplace=True,
                      a_bits=args.a_bits,
                      w_bits=args.w_bits,
                      q_type=args.q_type,
                      q_level=args.q_level,
                      device=device,
                      weight_observer=args.weight_observer,
                      bn_fuse=args.bn_fuse)
     print('\n***quant_model***\n', model)
 elif args.refine:
     print('******Float Refine model******')
     #checkpoint = torch.load('models_save/nin.pth')
     state_dict = torch.load(args.refine)
     if args.model_type == 0:
         model = nin.Net()
     else:
         model = nin_gc.Net()
     model.load_state_dict(state_dict)
     best_acc = 0
Beispiel #6
0
            quant_bn_fused_model_train = nin.Net(cfg=checkpoint['cfg'])
        else:
            checkpoint = torch.load('../models_save/nin_gc_bn_fused.pth')
            quant_bn_fused_model_train = nin_gc.Net(cfg=checkpoint['cfg'])
    else:
        if args.model_type == 0:
            checkpoint = torch.load('../models_save/nin_bn_fused.pth')
            quant_bn_fused_model_train = nin.Net()
        else:
            checkpoint = torch.load('../models_save/nin_gc_bn_fused.pth')
            quant_bn_fused_model_train = nin_gc.Net()
    quant_bn_fused_model_inference = copy.deepcopy(quant_bn_fused_model_train)
    quantize.prepare(quant_bn_fused_model_train,
                     inplace=True,
                     a_bits=args.a_bits,
                     w_bits=args.w_bits,
                     q_type=args.q_type,
                     q_level=args.q_level,
                     device=device,
                     bn_fuse=1)
    quantize.prepare(quant_bn_fused_model_inference,
                     inplace=True,
                     a_bits=args.a_bits,
                     w_bits=args.w_bits,
                     q_type=args.q_type,
                     q_level=args.q_level,
                     device=device,
                     bn_fuse=1,
                     quant_inference=True)
    quant_bn_fused_model_train.load_state_dict(checkpoint['state_dict'])
    quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])
Beispiel #7
0
    # cifar10类别
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # model
    if args.prune_refine:
        print('******Prune Refine model******')
        #checkpoint = torch.load('../prune/models_save/nin_refine.pth')
        checkpoint = torch.load(args.prune_refine)
        if args.model_type == 0:
            model = nin.Net(cfg=checkpoint['cfg'])
        else:
            model = nin_gc.Net(cfg=checkpoint['cfg'])
        model.load_state_dict(checkpoint['state_dict'])
        best_acc = 0
        print('***ori_model***\n', model)
        quantize.prepare(model, inplace=True, A=args.A, W=args.W)
        print('\n***quant_model***\n', model)
    elif args.refine:
        print('******Float Refine model******')
        #checkpoint = torch.load('models_save/nin.pth')
        state_dict = torch.load(args.refine)
        if args.model_type == 0:
            model = nin.Net()
        else:
            model = nin_gc.Net()
        model.load_state_dict(state_dict)
        best_acc = 0
        print('***ori_model***\n', model)
        quantize.prepare(model, inplace=True, A=args.A, W=args.W)
        print('\n***quant_model***\n', model)
    elif args.resume: