示例#1
0
def retrain(state_dict, part=1, num_epochs=5):

    # Hyper Parameters
    param = {
        'batch_size': 4,
        'test_batch_size': 50,
        'num_epochs': num_epochs,
        'learning_rate': 0.001,
        'weight_decay': 5e-4,
    }

    num_cnn_layer = sum(
        [int(len(v.size()) == 4) for d, v in state_dict.items()])

    num_fc_layer = sum(
        [int(len(v.size()) == 2) for d, v in state_dict.items()])

    state_key = [k for k, v in state_dict.items()]

    cfg = []
    first = True
    for d, v in state_dict.items():
        #print(v.data.size())
        if len(v.data.size()) == 4 or len(v.data.size()) == 2:
            if first:
                first = False
                cfg.append(v.data.size()[1])
            cfg.append(v.data.size()[0])

    assert num_cnn_layer + num_fc_layer == len(cfg) - 1

    net = ConvNet(cfg, num_cnn_layer, part)
    #    l = list(net.children())
    #    for i in range(len(l)):
    #        print(i,' ', l[i] )
    masks = []

    for i, p in enumerate(net.parameters()):

        p.data = state_dict[state_key[i]]

        if len(p.data.size()) == 4:

            p_np = p.data.cpu().numpy()

            masks.append(np.ones(p_np.shape).astype('float32'))

            value_this_layer = np.abs(p_np).sum(axis=(2, 3))

            for j in range(len(value_this_layer)):

                for k in range(len(value_this_layer[0])):

                    if abs(value_this_layer[j][k]) < 1e-4:

                        masks[-1][j][k] = 0.

        elif len(p.data.size()) == 2:

            p_np = p.data.cpu().numpy()

            masks.append(np.ones(p_np.shape).astype('float32'))

            value_this_layer = np.abs(p_np)

            for j in range(len(value_this_layer)):

                for k in range(len(value_this_layer[0])):

                    if abs(value_this_layer[j][k]) < 1e-4:

                        masks[-1][j][k] = 0.
#    for i in range(len(masks)):
#        print(len(masks[i]), ' ' , len(masks[i][0]))
    net.set_masks(masks)

    ## Retraining
    loader_train, loader_test = load_dataset()

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.RMSprop(net.parameters(),
                                    lr=param['learning_rate'],
                                    weight_decay=param['weight_decay'])
    #if num_epochs > 0:
    #    test(net, loader_test)

    train(net, criterion, optimizer, param, loader_train)

    for i, p in enumerate(net.parameters()):

        state_dict[state_key[i]] = p.data
        #print(p.data == state_dict[ state_key[i] ])

    #print("--- After retraining ---")
    #test(net, loader_test)

    #return net.state_dict()
    return state_dict
示例#2
0
def baseline_fitness(state_dict,num_epochs=600):
    
    # Hyper Parameters
    param = {    
        'batch_size': 4, 
        'test_batch_size': 50,
        'num_epochs': num_epochs,
        'learning_rate': 0.001,
        'weight_decay': 5e-4,
    }
    
    num_cnn_layer =sum( [ int(len(v.size())==4) for d, v in state_dict.items() ] )        

    num_fc_layer = sum( [ int(len(v.size())==2) for d, v in state_dict.items() ] ) 
    
    state_key = [ k for k,v in state_dict.items()]
        
    cfg = []
    first = True
    for d, v in state_dict.items():
        #print(v.data.size())    
        if len(v.data.size()) == 4 or len(v.data.size()) ==2:
            if first:
                first = False
                cfg.append(v.data.size()[1]) 
            cfg.append(v.data.size()[0])
    

    assert num_cnn_layer + num_fc_layer == len(cfg) - 1
    
    net = ConvNet(cfg, num_cnn_layer)
         
#    masks = []

    for i, p in enumerate(net.parameters()):
        
        p.data = state_dict[ state_key[i] ]
        
        if len(p.data.size()) == 4:
            pass
            #p_np = p.data.cpu().numpy()
            
            #masks.append(np.ones(p_np.shape).astype('float32'))
                    
            #value_this_layer = np.abs(p_np).sum(axis=(2,3))        
                                    
#            for j in range(len(value_this_layer)):
#                
#                for k in range(len(value_this_layer[0])):
#                    
#                    if abs( value_this_layer[j][k] ) < 1e-4:
#                    
#                        masks[-1][j][k] = 0.
                        
        elif len(p.data.size()) == 2:
            pass
            #p_np = p.data.cpu().numpy()
            
            #masks.append(np.ones(p_np.shape).astype('float32'))
                    
            #value_this_layer = np.abs(p_np)   
                                    
#            for j in range(len(value_this_layer)):
#                
#                for k in range(len(value_this_layer[0])):
#                    
#                    if abs( value_this_layer[j][k] ) < 1e-4:
#                    
#                        masks[-1][j][k] = 0.                                        
                        
    #net.set_masks(masks)   
    
    
    ## Retraining    
    loader_train, loader_test = load_dataset()
    
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.RMSprop(net.parameters(), lr=param['learning_rate'], 
                                    weight_decay=param['weight_decay'])
    #if num_epochs > 0:
    #    test(net, loader_test)
    
    #train(net, criterion, optimizer, param, loader_train)
    
    test_acc_list = []
    
    for t in range(num_epochs ):
    
        param['num_epochs'] = 10
        train(net, criterion, optimizer, param, loader_train)
    
        #print("--- After training ---")
        
        test_acc_list.append(test(net, loader_test))
        
    plt.plot(test_acc_list)
    
    with open('baseline_result.csv','a',newline='') as csvfile:
        writer  = csv.writer(csvfile)
        for row in test_acc_list:
            writer.writerow([row])
# Load the pretrained model
net = MLP()
net.load_state_dict(torch.load('models/mlp_pretrained.pkl'))
if torch.cuda.is_available():
    print('CUDA ensabled.')
    net.cuda()
print("--- Pretrained network loaded ---")
test(net, loader_test)

# prune the weights
masks = weight_prune(net, param['pruning_perc'])
net.set_masks(masks)
print("--- {}% parameters pruned ---".format(param['pruning_perc']))
test(net, loader_test)

# Retraining
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(net.parameters(),
                                lr=param['learning_rate'],
                                weight_decay=param['weight_decay'])

train(net, criterion, optimizer, param, loader_train)

# Check accuracy and nonzeros weights in each layer
print("--- After retraining ---")
test(net, loader_test)
prune_rate(net)

# Save and load the entire model
torch.save(net.state_dict(), 'models/mlp_pruned.pkl')
示例#4
0
# ratio_list=[10.0]
k = 5
n = 40
saving_road1 = 'models/vgg_real_pruned80_bestall(1).pkl'
saving_road2 = 'models/vgg_real_pruned80_bestparam(1).pkl'

net = gen_mask(net, criterion, optimizer, param, loader_train, loader_test,
               ratio_list, k)

# net = vgg19_bn()
# net.cuda()

print('new nets\' architecture')
mod = net.features._modules.items()
for i in mod:
    print(i)

optimizer = torch.optim.SGD(net.parameters(),
                            param['learning_rate'],
                            momentum=param['momentum'],
                            weight_decay=param['weight_decay'])

train(net, criterion, optimizer, param, loader_train, loader_test, n,
      saving_road1, saving_road2)

#prune_rate(net)

torch.save(net, 'models/vgg_real_pruned80_lastall(1).pkl')
torch.save(net.state_dict(), 'models/vgg_real_pruned80_lastparam(1).pkl')
                                          batch_size=param['test_batch_size'],
                                          shuffle=True)

model = LeNet()
model.load_state_dict(
    torch.load('models/lenet_pretrained.pkl', map_location='cpu'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
print("--- Accuracy of Pretrained Model ---")
test(model, loader_test)

# pruning
masks = lenet_prune()
model.set_masks(masks)
print("--- Accuracy After Pruning ---")
test(model, loader_test)

# Retraining
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(),
                                lr=param['learning_rate'],
                                weight_decay=param['weight_decay'])
train(model, criterion, optimizer, param, loader_train)

print("--- Accuracy After Retraining ---")
test(model, loader_test)
prune_rate(model)

# Save and load the entire model
torch.save(model.state_dict(), 'models/lenet_pruned.pkl')
if hp.plot:
    for i, snr in enumerate(snrs):
        print(i)
        scal = np.sqrt(snr * 2 * hp.k / hp.n)

        labels, ip = generate_input(amt=10**hp.e_prec)
        enc = encoder(ip)
        enc = enc + torch.randn_like(enc, device=device) / scal
        op = decoder(enc)

        errs[i] = error_rate(op, labels)

    plt.semilogy(xx, errs + 1 / 10**hp.e_prec, label='Pruned weights')

# Retraining
train()

# Check accuracy and nonzeros weights in each layer
print("--- After retraining ---")
acc = test()
prune_rate(net)

if hp.plot:
    for i, snr in enumerate(snrs):
        print(i)
        scal = np.sqrt(snr * 2 * hp.k / hp.n)

        labels, ip = generate_input(amt=10**hp.e_prec)
        enc = encoder(ip)
        enc = enc + torch.randn_like(enc, device=device) / scal
        op = decoder(enc)