Пример #1
0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        accuracy = 100. * correct / len(test_loader.dataset)
        print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')
    return accuracy

if args.pretrained:
    model.load_state_dict(torch.load('saves/elt_0.0_0.pth'))
    accuracy = test()
# Initial training
print("--- Initial training ---")
train(args.epochs, decay=args.decay, threshold=0.0)
accuracy = test()
torch.save(model.state_dict(), 'saves/elt_'+str(args.decay)+'_'+str(args.reg_type)+'.pth')

util.log(args.log, f"initial_accuracy {accuracy}")
#util.print_nonzeros(model)


Пример #2
0
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        accuracy = 100. * correct / len(test_loader.dataset)
        print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')
    return accuracy


model.load_state_dict(torch.load(args.model+'.pth'))
# Initial training
print("--- Pruning ---")
for name, p in model.named_parameters():
    if 'mask' in name:
        continue
    tensor = p.data.cpu().numpy()
    new_mask = np.where(abs(tensor) < args.sensitivity, 0, tensor)
    p.data = torch.from_numpy(new_mask).to(device)

accuracy = test()
util.print_nonzeros(model)

print("--- Finetuning ---")
train(args.epochs)
accuracy = test()
torch.save(model.state_dict(), args.model+'_T_'+str(args.sensitivity)+'.pth')

    return accuracy


# Initial training
print("--- Initial training ---")
train(args.epochs)
accuracy = test()
util.log(args.log, f"initial_accuracy {accuracy}")
torch.save(model, f"saves/initial_model.ptmodel")
print("--- Before pruning ---")
util.print_nonzeros(model)

# uty: test copy a model instance for later reference

uty_model_orig = type(model)(mask=True)
uty_model_orig.load_state_dict(model.state_dict())  # copy weights and stuff

print("!!!test")
print(model)
print(uty_model_orig)
print("\n")

# Pruning
model.prune_by_std(args.sensitivity)
accuracy = test()
util.log(args.log, f"accuracy_after_pruning {accuracy}")
print("--- After pruning ---")
util.print_nonzeros(model)

# uty test
# Retrain
Пример #4
0
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(
                output, target, reduction='sum').item()  # sum up batch loss
            pred = output.data.max(
                1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        accuracy = 100. * correct / len(test_loader.dataset)
        print(
            f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)'
        )
    return accuracy


if args.pretrained:
    model.load_state_dict(torch.load('saves/elt_0.0_0.pth'))
    accuracy = test()
# Initial training
print("--- Initial training ---")
train(args.epochs, decay=args.decay, threshold=0.0)
accuracy = test()
util.log(args.log, f"initial_accuracy {accuracy}")
torch.save(model.state_dict(),
           'saves/str_' + str(args.decay) + '_' + str(args.reg_type) + '.pth')