Esempio n. 1
0
f = open(os.path.join(args.loaddir, args.exp_logger), "a+")
f.write('prune_ratio: %f, quantize_bits: %d, attack: %s-%d\n' % (args.prune_ratio, args.quantize_bits, args.attack_algo, args.attack_eps))

try:
    import math
    # ready to go
    if isinstance(quant_model, nn.DataParallel):
        quant_model.module.replace_bias(sparse_model.module, weight_name)
    else:
        quant_model.replace_bias(sparse_model, weight_name)
    acc, acc_adv = model_test(quant_model, 0, test_loader, 
            atk_algo=attack_algo, atk_eps=args.attack_eps, 
            iscuda=args.cuda, adv_iter=args.attack_iter, criterion=F.cross_entropy)
    
    layers = layers_nnz(quant_model, param_name=weight_name)[1]
    misc.print_dict(layers, name="NNZ PER LAYER")
    # print(all_num)
    sparse_factor = sum(layers.values())
    # print(sparse_factor)
    if quantize_algo is not None:
        if args.quantize_algo == "kmeans_nnz_fixed_0_center":
            weight_bits = [math.ceil(math.log2(item-1)) if item > 1 else 0 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
            weight_dict_size = [ (item - 1) * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())] 
        elif args.quantize_algo == "kmeans":
            weight_bits = [math.ceil(math.log2(item)) for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
            weight_dict_size = [item * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
        elif args.quantize_algo == "kmeans_fixed_nnz":
            weight_bits = [math.ceil(math.log2(item-1)) if item > 1 else 0 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
            weight_dict_size = [ (item - 1) * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())] 
        else:
            raise 'quantize algo error!'
Esempio n. 2
0
best_train_acc = 0.
best_dist = np.inf

try:
    # ready to go
    model_test(model,
               0,
               test_loader,
               atk_algo=attack_algo,
               atk_eps=args.attack_eps,
               iscuda=args.cuda,
               adv_iter=16,
               criterion=F.cross_entropy)

    layers = layers_nnz(model, param_name=weight_name)[1]
    misc.print_dict(layers, name="NNZ PER LAYER")

    layers_n = pt.layers_n(model_base, param_name=["weight"])[1]
    all_num = sum(layers_n.values())
    # print(all_num)
    sparse_factor = int(all_num * args.prune_ratio)
    # print(sparse_factor)
    model_size = sparse_factor * args.quantize_bits
    print("\t MODEL SIZE {}".format(model_size))
    weight_bits = [args.quantize_bits for _ in layers
                   ] if args.quantize_algo == "model_size_quant" else None
    print("\t weight bits {}".format(weight_bits))
    layernnz = lambda m: list(
        pt.layers_nnz(m, param_name=weight_name)[1].values())
    param_list = lambda m: pt.param_list(m, param_name=weight_name)