if quantize_lambda is not None:
                if isinstance(modelz, nn.DataParallel):
                    modelz.module.replace_bias(model.module, weight_name)
                else:
                    modelz.replace_bias(model, weight_name)
                acc, acc_adv = model_test(modelz,
                                          epoch,
                                          test_loader,
                                          atk_algo=attack_algo,
                                          atk_eps=args.attack_eps,
                                          iscuda=args.cuda,
                                          adv_iter=args.defend_iter,
                                          criterion=F.cross_entropy)

                layers = layers_unique(modelz,
                                       normalized=False,
                                       param_name=weight_name)[1]
            if quantize_lambda is not None:
                dist, dist_layer, relative_layer = util_trts.model_distance(
                    model, modelz, weight_name=weight_name)
                print("\t model dist: {}, layer-wise dist :{}, relative: {}".
                      format(dist, dist_layer, relative_layer))
                if dist < best_dist:
                    best_dist = dist
                    misc.model_saver(model, args.savedir, args.save_model_name,
                                     "sparse_closest_" + descripter)
                    misc.model_saver(modelz, args.savedir,
                                     args.save_model_name,
                                     "quant_closest_" + descripter)
                    print("\t\t closest model saved")
示例#2
0
 if isinstance(quant_model, nn.DataParallel):
     quant_model.module.replace_bias(sparse_model.module, weight_name)
 else:
     quant_model.replace_bias(sparse_model, weight_name)
 acc, acc_adv = model_test(quant_model, 0, test_loader, 
         atk_algo=attack_algo, atk_eps=args.attack_eps, 
         iscuda=args.cuda, adv_iter=args.attack_iter, criterion=F.cross_entropy)
 
 layers = layers_nnz(quant_model, param_name=weight_name)[1]
 misc.print_dict(layers, name="NNZ PER LAYER")
 # print(all_num)
 sparse_factor = sum(layers.values())
 # print(sparse_factor)
 if quantize_algo is not None:
     if args.quantize_algo == "kmeans_nnz_fixed_0_center":
         weight_bits = [math.ceil(math.log2(item-1)) if item > 1 else 0 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
         weight_dict_size = [ (item - 1) * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())] 
     elif args.quantize_algo == "kmeans":
         weight_bits = [math.ceil(math.log2(item)) for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
         weight_dict_size = [item * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
     elif args.quantize_algo == "kmeans_fixed_nnz":
         weight_bits = [math.ceil(math.log2(item-1)) if item > 1 else 0 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())]
         weight_dict_size = [ (item - 1) * 32 for item in list(pt.layers_unique(quant_model, weight_name)[1].values())] 
     else:
         raise 'quantize algo error!'
 else:
     weight_bits = [32] * len(layers)
 model_size = 0
 for i in range(len(layers)):
     dict_size = weight_dict_size[i] if quantize_algo is not None else 0
     nnz_cur = list(layers.values())[i]
示例#3
0
                           atk_algo=attack_algo,
                           atk_eps=args.attack_eps,
                           iscuda=args.cuda,
                           adv_iter=args.attack_iter,
                           criterion=F.cross_entropy)
 layers = layers_nnz(model, param_name=weight_name)[1]
 # misc.print_dict(layers, name="NNZ PER LAYER")
 # print(all_num)
 sparse_factor = sum(layers.values())
 # print(sparse_factor)
 if quantize_algo is not None:
     if args.quantize_algo == "kmeans_nnz_fixed_0_center":
         weight_bits = [
             math.ceil(math.log2(item - 1)) if item > 1 else 0
             for item in list(
                 pt.layers_unique(model, weight_name)[1].values())
         ]
         weight_dict_size = [(item - 1) * 32 for item in list(
             pt.layers_unique(model, weight_name)[1].values())]
     elif args.quantize_algo == "kmeans":
         weight_bits = [
             math.ceil(math.log2(item)) for item in list(
                 pt.layers_unique(model, weight_name)[1].values())
         ]
         weight_dict_size = [
             item * 32 for item in list(
                 pt.layers_unique(model, weight_name)[1].values())
         ]
     elif args.quantize_algo == "kmeans_fixed_nnz":
         weight_bits = [
             math.ceil(math.log2(item - 1)) if item > 1 else 0