Ejemplo n.º 1
0
criterion = nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)

# loading wm examples
if args.wmtrain:
    print("WM acc:")
    test(net, criterion, logfile, wmloader, device)

# start training
for epoch in range(start_epoch, start_epoch + args.max_epochs):
    # adjust learning rate
    adjust_learning_rate(args.lr, optimizer, epoch, args.lradj, args.ratio)

    train(epoch, net, criterion, optimizer, logfile, trainloader, device,
          wmloader)

    print("Test acc:")
    acc = test(net, criterion, logfile, testloader, device)

    if args.wmtrain:
        print("WM acc:")
        test(net, criterion, logfile, wmloader, device)

    print('Saving..')
    state = {
        'net': net.module if device is 'cuda' else net,
        'acc': acc,
Ejemplo n.º 2
0
                                        torch.cuda.device_count()))
        parent = torch.nn.DataParallel(parent,
                                       device_ids=range(
                                           torch.cuda.device_count()))
        cudnn.benchmark = True

    test_criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    # start training
    for epoch in range(start_epoch, start_epoch + max_epochs):
        # adjust learning rate
        adjust_learning_rate(lr, optimizer, epoch, lradj)

        train_steal(epoch,
                    net,
                    parent,
                    optimizer,
                    logfile,
                    testloader if use_test else trainloader,
                    device,
                    grad_query=grad_query)

        print("Test acc:")
        acc = test(net, test_criterion, logfile, testloader, device)

        print('Saving..')
        state = {