Beispiel #1
0
def prune_model_mixed(model):

    model.cpu()
    slim=pruning.Autoslim(model,inputs=torch.randn(1,3,32,32),compression_ratio=0.6)
    # layer_compression_rate={1:0.125,7:0.125,13:0.125,56:0.625}
    # layer_index_record=[]
    # for key,value in slim.index_of_layer().items():
    #     layer_index_record.append(key)
    #     print(key,value)

    # # 0.25 0.5 0.625 0.75 0.875
    # for num in layer_index_record:
    #     if num not in layer_compression_rate:
    #         if num<18:
    #             layer_compression_rate[num]=0.5
    #         elif num<33:
    #             layer_compression_rate[num]=0.625
    #         elif num<48:
    #             layer_compression_rate[num]=0.75
    #         else:
    #             layer_compression_rate[num]=0.875

    #全自动化压缩+剪跳连层=分级剪枝
    slim.fpgm_pruning()
    #print('layer_compression_rate:\n',layer_compression_rate)
    return model
def prune_model_mixed(model):

    model.cpu()
    slim = pruning.Autoslim(model,
                            inputs=torch.randn(1, 3, 32, 32),
                            compression_ratio=0.5)
    config = {
        'layer_compression_ratio': None,
        'norm_rate': 1.0,
        'prune_shortcut': 1,
        'global_pruning': False,
        'pruning_func': 'l1'
    }
    slim = pruning.Autoslim(model,
                            inputs=torch.randn(1, 3, 32, 32),
                            compression_ratio=0.5)
    slim.base_prunging(config)
    return model
def prune_model_with_shortcut(model):
    model.cpu()
    print(model)
    slim=pruning.Autoslim(model,inputs=torch.randn(1,3,32,32),compression_ratio=0.795)
    slim.l1_norm_pruning()
    print('---------------------')
    print(model)

    return model    
Beispiel #4
0
def prune_model_without_shortcut(model):
    model.cpu()
    slim=pruning.Autoslim(model,inputs=torch.randn(1,3,32,32),compression_ratio=0)
    
    
    #print(model)
    layer_compression_rate={5:0.2,11:0.5,18:0.5,26:0.5,33:0.75,41:0.75,48:0.875,56:0.875}
    slim.l1_norm_pruning(layer_compression_ratio=layer_compression_rate)
    #print(model)
    return model
Beispiel #5
0
def prune_model_with_shortcut(model):
    model.cpu()
    
    slim=pruning.Autoslim(model,inputs=torch.randn(1,3,32,32),compression_ratio=0.5)
    slim.l1_norm_pruning()
    # DG = tp.DependencyGraph().build_dependency( model, torch.randn(1, 3, 32, 32) )
    # def prune_conv(conv, pruned_prob):
    #     weight = conv.weight.detach().cpu().numpy()
    #     out_channels = weight.shape[0]
    #     L1_norm = np.sum( np.abs(weight), axis=(1,2,3))
    #     num_pruned = int(out_channels * pruned_prob)
    #     prune_index = np.argsort(L1_norm)[:num_pruned].tolist() # remove filters with small L1-Norm
    #     plan = DG.get_pruning_plan(conv, tp.prune_conv, prune_index)
    #     plan.exec()
    
    # block_prune_probs = [0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3]
    # blk_id = 0
    # for m in model.modules():
    #     if isinstance( m, resnet.BasicBlock ):
    #         prune_conv( m.conv1, block_prune_probs[blk_id] )
    #         prune_conv( m.conv2, block_prune_probs[blk_id] )
    #         blk_id+=1
    return model    
def prune_model_mixed(model):

    model.cpu()
    slim=pruning.Autoslim(model,inputs=torch.randn(1,3,32,32),compression_ratio=0)
    layer_compression_rate={}
    layer_index_record=[]
    for key,value in slim.index_of_layer().items():
        layer_index_record.append(key)
        #print(key,value)
    for num in layer_index_record:
        if num<18:
            layer_compression_rate[num]=0.5
        elif num<33:
            layer_compression_rate[num]=0.625
        elif num<48:
            layer_compression_rate[num]=0.75
        else:
            layer_compression_rate[num]=0.875

    
    #slim.l1_norm_pruning(layer_compression_ratio=layer_compression_rate)
    slim.l1_norm_pruning()
    print('---------------------------')
    return model
import torch_pruning as pruning
from torchvision.models import resnet18
import torch

# 模型建立
model = resnet18()
flops_raw, params_raw = pruning.get_model_complexity_info(
    model, (3, 224, 224), as_strings=True, print_per_layer_stat=False)  
print('-[INFO] before pruning flops:  ' + flops_raw)
print('-[INFO] before pruning params:  ' + params_raw)
# 选择裁剪方式
mod = 'fpgm'

# 剪枝引擎建立
slim = pruning.Autoslim(model, inputs=torch.randn(
    1, 3, 224, 224), compression_ratio=0.5)

if mod == 'fpgm':
    config = {
        'layer_compression_ratio': None,
        'norm_rate': 1.0, 'prune_shortcut': 1,
        'dist_type': 'l1', 'pruning_func': 'fpgm'
    }
elif mod == 'l1':
    config = {
        'layer_compression_ratio': None,
        'norm_rate': 1.0, 'prune_shortcut': 1,
        'global_pruning': False, 'pruning_func': 'l1'
    }
slim.base_prunging(config)
flops_new, params_new = pruning.get_model_complexity_info(