예제 #1
0
def test(epoch):
    net.eval()
    test_loss = 0
    correct = 0
    nominal_correct = 0
    total = 0
    total_epsilon = 0
    for batch_idx, (inputs, targets) in enumerate(testloader):
        inputs, targets = inputs.to(device), targets.to(device)

        inputs_pgd, _, epsilons = attack(torch.clamp(unnormalize(inputs),
                                                     min=0),
                                         targets,
                                         net,
                                         p=args.p,
                                         normalize=normalize,
                                         epsilon_factor=args.epsilon_factor,
                                         epsilon=args.init_epsilon,
                                         maxiters=args.maxiters,
                                         epsilon_iters=args.epsilon_iters,
                                         regularization=args.reg,
                                         alpha=args.alpha,
                                         norm=args.norm,
                                         ball=args.ball,
                                         multiply=True)
        with torch.no_grad():
            outputs = net(normalize(inputs_pgd))
            loss = criterion(outputs, targets)

            outputs_nominal = net(inputs)
            _, predicted_nominal = outputs_nominal.max(1)
            nominal_correct += predicted_nominal.eq(targets).sum().item()

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            total_epsilon += epsilons.sum().item()

            progress_bar(
                batch_idx, len(testloader),
                'Loss: %.3f | Adv Acc: %.3f%% (%d/%d) | Acc: %.3f%% (%d/%d) | Eps: %.3f%%'
                % (test_loss / (batch_idx + 1), 100. * correct / total,
                   correct, total, 100. * nominal_correct / total,
                   nominal_correct, total, total_epsilon / total))

    if epoch % 10 == 0:
        # Save checkpoint.
        acc = 100. * correct / total
        eps = total_epsilon / total
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'eps': eps,
            'epoch': epoch,
        }
        if not os.path.isdir(args.outdir):
            os.mkdir(args.outdir)
        torch.save(state, checkpoint_file.format(epoch))
예제 #2
0
def test_attack():
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    all_epsilons = []
    succeed_epsilons = []
    L1_delta = []
    W_delta = []

    for batch_idx, (inputs, targets) in enumerate(testloader):
        inputs, targets = inputs.to(device), targets.to(device)
        if args.binarize:
            inputs = (inputs >= 0.5).float()

        inputs_pgd, _, epsilons = attack(
            torch.clamp(inputs, min=0),
            targets,
            net,
            regularization=regularization,
            p=args.p,
            alpha=args.alpha,
            norm=args.norm,
            ball=args.ball,
            epsilon_iters=args.epsilon_iters,
            epsilon_factor=args.epsilon_factor,
            epsilon=args.init_epsilon,
            maxiters=args.maxiters,
            kernel_size=args.kernel,
            use_tqdm=True,
            clamping=not args.no_clamping,
            constrained_sinkhorn=not args.unconstrained)

        outputs_pgd = net(inputs_pgd)
        loss = criterion(outputs_pgd, targets)

        test_loss += loss.item()
        _, predicted = outputs_pgd.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        epsilons[predicted == targets] = float('inf')
        which_correct = epsilons == float('inf')
        succeed_epsilons.append(epsilons[~which_correct])
        all_epsilons.append(epsilons)

        progress_bar(
            batch_idx, len(testloader),
            'Loss: %.3f | Acc: %.3f%% (%d/%d) | Avg epsilon: %.3f' %
            (test_loss / (batch_idx + 1), 100. * correct / total, correct,
             total, torch.cat(succeed_epsilons).float().mean().item()))
        acc = 100. * correct / total

    all_epsilons = torch.cat(all_epsilons)
    with open(save_name, 'w') as f:
        f.write('index\tradius\n')
        for i in range(len(all_epsilons)):
            f.write(f'{i+1}\t{all_epsilons[i].item()}\n')
예제 #3
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    train_loss = 0
    correct = 0
    nominal_correct = 0
    total_epsilon = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        net.eval()
        inputs_pgd, _, epsilons = attack(torch.clamp(unnormalize(inputs),
                                                     min=0),
                                         targets,
                                         net,
                                         p=args.p,
                                         normalize=normalize,
                                         epsilon_factor=args.epsilon_factor,
                                         epsilon=args.init_epsilon,
                                         maxiters=args.maxiters,
                                         epsilon_iters=args.epsilon_iters,
                                         regularization=args.reg,
                                         alpha=args.alpha,
                                         norm=args.norm,
                                         ball=args.ball,
                                         sinkhorn_maxiters=10,
                                         training=True,
                                         kernel_size=5,
                                         l1_delta=args.L1D,
                                         multiply=True)
        net.train()
        optimizer.zero_grad()
        outputs = net(normalize(inputs_pgd.detach()))
        _, predicted = outputs.max(1)
        correct += predicted.eq(targets).sum().item()
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            net.eval()
            outputs_nominal = net(inputs)
            _, predicted_nominal = outputs_nominal.max(1)
            nominal_correct += predicted_nominal.eq(targets).sum().item()

            train_loss += loss.item()
            total += targets.size(0)
            total_epsilon += epsilons.sum().item()

        progress_bar(
            batch_idx, len(trainloader),
            'Loss: %.3f | Adv Acc: %.3f%% (%d/%d) | Acc: %.3f%% (%d/%d) | Eps: %.3f%%'
            % (train_loss / (batch_idx + 1), 100. * correct / total, correct,
               total, 100. * nominal_correct / total, nominal_correct, total,
               total_epsilon / total))
예제 #4
0
def test_attack():
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    all_epsilons = []

    for batch_idx, (inputs, targets) in enumerate(testloader):
        inputs, targets = inputs.to(device), targets.to(device)
        if binarize:
            inputs = (inputs >= 0.5).float()

        inputs_pgd, _, epsilons = attack(torch.clamp(inputs, min=0),
                                         targets,
                                         net,
                                         regularization=regularization,
                                         p=args.p,
                                         alpha=args.alpha,
                                         norm=args.norm,
                                         ball=args.ball,
                                         epsilon=0.7,
                                         maxiters=200,
                                         kernel_size=7)

        outputs_pgd = net(inputs_pgd)
        loss = criterion(outputs_pgd, targets)

        test_loss += loss.item()
        _, predicted = outputs_pgd.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        epsilons[predicted == targets] = -1
        all_epsilons.append(epsilons)

        progress_bar(
            batch_idx, len(testloader),
            'Loss: %.3f | Acc: %.3f%% (%d/%d) | Avg epsilon: %.3f' %
            (test_loss / (batch_idx + 1), 100. * correct / total, correct,
             total, torch.cat(all_epsilons).float().mean().item()))

        acc = 100. * correct / total
        torch.save((acc, torch.cat(all_epsilons)), save_name)
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    nominal_correct = 0
    total_epsilon = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        inputs_pgd, _, epsilons = attack(inputs, targets, net, 
                                         epsilon_factor=1.4, epsilon=0.1,
                                         maxiters=50, epsilon_iters=5, 
                                         p=args.p, 
                                         regularization=args.reg, 
                                         alpha=args.alpha, 
                                         norm=args.norm, 
                                         ball=args.ball)
        optimizer.zero_grad()
        outputs = net(inputs_pgd.detach())
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        with torch.no_grad(): 
            outputs_nominal = net(inputs)
            _, predicted_nominal = outputs_nominal.max(1)
            nominal_correct += predicted_nominal.eq(targets).sum().item()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            total_epsilon += (epsilons < float("inf")).sum().item()

        progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Adv Acc: %.3f%% (%d/%d) | Acc: %.3f%% (%d/%d) | Eps: %.3f%%'
            % (train_loss/(batch_idx+1), 100.*correct/total, correct, total, 
                100.*nominal_correct/total, nominal_correct, total, total_epsilon/total))