checkpoint = torch.load('./Para13/checkpoint-100.pt')
model.load_state_dict(checkpoint['model_state'])

for epoch in range(start_epoch, args.epochs + 1):
    time_ep = time.time()

    lr = learning_rate_schedule(args.lr, epoch, args.epochs)
    #  lr = args.lr
    utils.adjust_learning_rate(optimizer, lr)

    train_res = utils.train(loaders['train'], model, optimizer, criterion,
                            regularizer)

    test_res = utils.test(loaders['test'], model, criterion, regularizer)

    test_poison_res = utils.test_poison(inputs, targets, model, args.numS,
                                        criterion)

    if epoch % args.save_freq == 0:
        utils.save_checkpoint(args.dir,
                              epoch,
                              model_state=model.state_dict(),
                              optimizer_state=optimizer.state_dict())

    time_ep = time.time() - time_ep
    values = [
        epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'],
        test_res['accuracy'], test_poison_res['accuracyS'],
        test_poison_res['accuracyT'], time_ep
    ]

    table = tabulate.tabulate([values],
Пример #2
0
acc_clean = []
acc_poison = []
for k in range(121, 140):
    print('Resume training model')
    #assert os.path.isdir('Para2'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./split/checkpoint-%d.pt' % k)
    basic_net.load_state_dict(checkpoint['model_state'])

    #print('Resume training model')
    #checkpoint = torch.load('./Res_single_true_10_same1/checkpoint-100.pt')
    #basic_net.load_state_dict(checkpoint['model_state'])

    #optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)

    start_epoch = 1
    for epoch in range(start_epoch, start_epoch + 1):
        #  test_examples(epoch)
        test_res = utils.test(loaders['test'], basic_net, criterion)
        acc_clean.append(test_res['accuracy'])
        print('Val acc:', test_res['accuracy'])

        #   te_example_res = utils.test_poison(testset, basic_net, criterion)
        te_example_res = utils.test_poison(inputs, targets, basic_net,
                                           args.numS, criterion)
        acc_poison.append(te_example_res['accuracyS'])
        print('Poison Val acc:', te_example_res['accuracyS'])

print('Ave Val acc:', np.mean(acc_clean))
print('Ave Poison Val acc:', np.mean(acc_poison))
Пример #3
0
#net = AttackPGD(model, config)

t = torch.FloatTensor([0.0]).cuda()
for i, t_value in enumerate(ts):
    t.data.fill_(t_value)
    weights = model.weights(t)
    if previous_weights is not None:
        dl[i] = np.sqrt(np.sum(np.square(weights - previous_weights)))
    previous_weights = weights.copy()

    utils.update_bn(loaders['train'], model, t=t)
    tr_res = utils.test(loaders['train'], model, criterion, regularizer, t=t)
    te_res = utils.test(loaders['test'], model, criterion, regularizer, t=t)
    te_example_res = utils.test_poison(testset,
                                       model,
                                       criterion,
                                       regularizer,
                                       t=t)
    #  te_example_res = utils.test_examples(loaders['test'], net, criterion, regularizer)
    tr_loss[i] = tr_res['loss']
    tr_nll[i] = tr_res['nll']
    tr_acc[i] = tr_res['accuracy']
    tr_err[i] = 100.0 - tr_acc[i]
    te_loss[i] = te_res['loss']
    te_nll[i] = te_res['nll']
    te_acc[i] = te_res['accuracy']
    te_err[i] = 100.0 - te_acc[i]

    te_exa_loss[i] = te_example_res['loss']
    te_exa_acc[i] = te_example_res['accuracy']
    te_exa_err[i] = 100.0 - te_exa_acc[i]
Пример #4
0
print('Resume training model')
checkpoint = torch.load('./VGG_single_true_10_same1/checkpoint-100.pt' )
model.load_state_dict(checkpoint['model_state'])

for epoch in range(start_epoch, args.epochs + 1):
    time_ep = time.time()

    lr = learning_rate_schedule(args.lr, epoch, args.epochs)
  #  lr = args.lr
    utils.adjust_learning_rate(optimizer, lr)

    train_res = utils.train(loaders['train'], model, optimizer, criterion, regularizer)

    test_res = utils.test(loaders['test'], model, criterion, regularizer)
 #   test_poison_res = utils.test(loaders['test'], model, criterion, regularizer)
    test_poison_res = utils.test_poison(loaders['testset'], model, criterion, regularizer)

    if epoch % args.save_freq == 0:
        utils.save_checkpoint(
            args.dir,
            epoch,
            model_state=model.state_dict(),
            optimizer_state=optimizer.state_dict()
        )

    time_ep = time.time() - time_ep
    values = [epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'], test_res['accuracy'],
              test_poison_res['nll'], test_poison_res['accuracy'], time_ep]

    table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
    if epoch % 40 == 1 or epoch == start_epoch:
Пример #5
0
criterion = nn.CrossEntropyLoss()

acc_clean = []
acc_poison = []
for k in range(121, 140):
    print('Resume training model')
    #assert os.path.isdir('Para2'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(
        './VGG16_poi_single_target_5_2bad_testset_split/checkpoint-%d.pt' % k)
    basic_net.load_state_dict(checkpoint['model_state'])

    #print('Resume training model')
    #checkpoint = torch.load('./Res_single_true_10_same1/checkpoint-100.pt')
    #basic_net.load_state_dict(checkpoint['model_state'])

    #optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)

    start_epoch = 1
    for epoch in range(start_epoch, start_epoch + 1):
        #  test_examples(epoch)
        test_res = utils.test(loaders['test'], basic_net, criterion)
        acc_clean.append(test_res['accuracy'])
        print('Val acc:', test_res['accuracy'])

        te_example_res = utils.test_poison(testset, basic_net, criterion)
        acc_poison.append(te_example_res['accuracy'])
        print('Poison Val acc:', te_example_res['accuracy'])

print('Ave Val acc:', np.mean(acc_clean))
print('Ave Poison Val acc:', np.mean(acc_poison))
Пример #6
0
    for data, target in test_loader:
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.cross_entropy(
            output, target, size_average=False).item()  # sum up batch loss
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
    return correct / float(len(test_loader.dataset))


def save_checkpoint(state, is_best, filepath):
    torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
    if is_best:
        shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'),
                        os.path.join(filepath, 'model_best.pth.tar'))


prec1 = test()

res = utils.test_poison(testset1, model, criterion)

print(res['accuracy'])
has_bn = utils.check_bn(model)
test_res = {'loss': None, 'accuracy': None, 'nll': None}

for epoch in range(start_epoch, args.epochs + 1):
    time_ep = time.time()

    lr = learning_rate_schedule(args.lr, epoch, args.epochs)
    #  lr = args.lr
    utils.adjust_learning_rate(optimizer, lr)

    train_res = utils.train(loaders['train'], model, optimizer, criterion,
                            regularizer)

    test_res = utils.test(loaders['test'], model, criterion, regularizer)

    test_poison_res = utils.test_poison(testset, model, criterion, regularizer)

    if epoch % args.save_freq == 0:
        utils.save_checkpoint(args.dir,
                              epoch,
                              model_state=model.state_dict(),
                              optimizer_state=optimizer.state_dict())

    time_ep = time.time() - time_ep
    values = [
        epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'],
        test_res['accuracy'], test_poison_res['nll'],
        test_poison_res['accuracy'], time_ep
    ]

    table = tabulate.tabulate([values],
Пример #8
0
targets = D['targets']

t = torch.FloatTensor([0.0]).cuda()
for i, t_value in enumerate(ts):
    t.data.fill_(t_value)
    weights = model.weights(t)
    if previous_weights is not None:
        dl[i] = np.sqrt(np.sum(np.square(weights - previous_weights)))
    previous_weights = weights.copy()

    utils.update_bn(loaders['train'], model, t=t)
    #    tr_res = utils.test(loaders['train'], model, criterion, regularizer, t=t)
    te_res = utils.test(loaders['test'], model, criterion, regularizer, t=t)
    te_example_res = utils.test_poison(inputs,
                                       targets,
                                       model,
                                       args.numS,
                                       criterion,
                                       t=t)
    #  te_example_res = utils.test_examples(loaders['test'], net, criterion, regularizer)
    #   tr_loss[i] = tr_res['loss']
    #    tr_nll[i] = tr_res['nll']
    #    tr_acc[i] = tr_res['accuracy']
    tr_loss[i] = 0
    tr_nll[i] = 0
    tr_acc[i] = 0
    tr_err[i] = 100.0 - tr_acc[i]
    te_loss[i] = te_res['loss']
    te_nll[i] = te_res['nll']
    te_acc[i] = te_res['accuracy']
    te_err[i] = 100.0 - te_acc[i]