Esempio n. 1
0
def attack(targeted_model, random_start=False,args):
    if args.attack=='FGSM':
        from adversarialbox.attacks import FGSMAttack
        adversary=FGSMAttack(targeted_model,args.epsilon)
    if args.attack=='BIM':
        from adversarialbox.attacks import LinfPGDAttack
        adversary=LinfPGDAttack(targeted_model, random_start)
Esempio n. 2
0
                              transform=transforms.ToTensor())
loader_test = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=param['test_batch_size'],
                                          shuffle=True)

# Setup the model
net = LeNet5()

if torch.cuda.is_available():
    print('CUDA ensabled.')
    net.cuda()
net.train()

# Adversarial training setup
#adversary = FGSMAttack(epsilon=0.3)
adversary = LinfPGDAttack()

# Train the model
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(net.parameters(),
                                lr=param['learning_rate'],
                                weight_decay=param['weight_decay'])

for epoch in range(param['num_epochs']):

    print('Starting epoch %d / %d' % (epoch + 1, param['num_epochs']))

    for t, (x, y) in enumerate(loader_train):

        x_var, y_var = to_var(x), to_var(y.long())
        loss = criterion(net(x_var), y_var)
		weight_p += [p]

trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))

base_params = filter(lambda p: id(p) not in trans_params,
                     n.parameters())
base_params = filter(lambda p: id(p) not in class_params,
                     base_params)
param = {
	'delay': 0,
}
if args.m=='fgsm':
	adversary = FGSMAttack(epsilon=8.0/255.0)
elif args.m=='pgd':
	adversary = LinfPGDAttack( epsilon=8.0/255.0, a=2.0/255.0,k=40)
else:
	print('wrong method')
	exit(0)
loss1 = nn.MSELoss()
loss1.cuda()
loss2 = nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adam([{'params': base_params},
                              {'params':n.trans_conv.parameters(),'lr':learning_rate},
                              {'params':n.group2.parameters(),'lr':learning_rate}],
                      lr=learning_rate,weight_decay=wd)

opt = torch.optim.Adam([{'params': base_params},
                          {'params':n.trans_conv.parameters(),'lr':learning_rate}],
                      lr=learning_rate,weight_decay=wd)
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description='Neural network verification using DeepZ relaxation')
    parser.add_argument('--net',
                        type=str,
                        choices=['fc1', 'fc2', 'fc3', 'fc4', 'fc5', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
                        required=True,
                        help='Neural network to verify.')
    args = parser.parse_args()

    if args.net == 'fc1':
        net = FullyConnected(DEVICE, INPUT_SIZE, [100, 10]).to(DEVICE)
    elif args.net == 'fc2':
        net = FullyConnected(DEVICE, INPUT_SIZE, [50, 50, 10]).to(DEVICE)
    elif args.net == 'fc3':
        net = FullyConnected(DEVICE, INPUT_SIZE, [100, 100, 10]).to(DEVICE)
    elif args.net == 'fc4':
        net = FullyConnected(DEVICE, INPUT_SIZE, [100, 100, 100, 10]).to(DEVICE)
    elif args.net == 'fc5':
        net = FullyConnected(DEVICE, INPUT_SIZE, [400, 200, 100, 100, 10]).to(DEVICE)
    elif args.net == 'conv1':
        net = Conv(DEVICE, INPUT_SIZE, [(32, 4, 2, 1)], [100, 10], 10).to(DEVICE)
    elif args.net == 'conv2':
        net = Conv(DEVICE, INPUT_SIZE, [(32, 4, 2, 1), (64, 4, 2, 1)], [100, 10], 10).to(DEVICE)
    elif args.net == 'conv3':
        net = Conv(DEVICE, INPUT_SIZE, [(32, 3, 1, 1), (32, 4, 2, 1), (64, 4, 2, 1)], [150, 10], 10).to(DEVICE)
    elif args.net == 'conv4':
        net = Conv(DEVICE, INPUT_SIZE, [(32, 4, 2, 1), (64, 4, 2, 1)], [100, 100, 10], 10).to(DEVICE)
    elif args.net == 'conv5':
        net = Conv(DEVICE, INPUT_SIZE, [(16, 3, 1, 1), (32, 4, 2, 1), (64, 4, 2, 1)], [100, 100, 10], 10).to(DEVICE)

    net.load_state_dict(torch.load('../mnist_nets/%s.pt' % args.net, map_location=torch.device(DEVICE)))
    test_loader = torch.utils.data.DataLoader(
        torchvision.datasets.MNIST('../data/', train=False, download=True,
                                   transform=torchvision.transforms.Compose([
                                       torchvision.transforms.ToTensor()
                                   ])),
        batch_size=1, shuffle=True)
    examples = enumerate(test_loader)
    eps = 0.02
    fnet = LinfPGDAttack(net, epsilon=eps, k=40)
    point = 0
    for batch_idx, (x, y) in examples:
        X_adv = fnet.perturb(x.numpy(), y.numpy())
        X = torch.from_numpy(X_adv)

        if net(X).max(dim=1)[1].item() == y.item():
            out = 'verified'
        else:
            out = 'not verified'
        print(out)

        if analyze(net, X, eps, y.item()):
            pred = 'verified'
        else:
            pred = 'not verified'
        print(pred)
        print('-----------')

        if out == pred:
            point += 1
        if out == 'not verified' and pred == 'verified':
            point -= 2
    print('marks ', point)
Esempio n. 5
0
def main():
    global best_acc

    # Data
    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
    ])

    # data_file = './data'
    # data_file = 'C:/Users/dongxian/data/CIFAR'
    data_file = '/data/wudongxian/dataset/CIFAR/'
    if args.dataset == 'cifar10':
        data_name = 'CIFAR10'
        num_classes = 10
    else:
        data_name = 'CIFAR100'
        num_classes = 100
    trainset = getattr(torchvision.datasets,
                       data_name)(root=data_file,
                                  train=True,
                                  download=True,
                                  transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.train_batch,
                                              shuffle=True,
                                              num_workers=args.workers)

    testset = getattr(torchvision.datasets,
                      data_name)(root=data_file,
                                 train=False,
                                 download=True,
                                 transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers)

    title = 'adv-train-' + args.arch
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    logger = Logger(os.path.join(args.checkpoint, 'log_attack_results.txt'),
                    title=title)
    logger.set_names(
        ['Adv Train Loss', 'Adv Valid Loss', 'Adv Train Acc', 'Adv Valid Acc'])

    # create Model
    print('==> Building model..')
    net = models.__dict__[args.arch](num_classes=num_classes)
    if torch.cuda.is_available():
        net = net.to(device)
        net = torch.nn.DataParallel(net)
        print('Using', torch.cuda.device_count(), 'GPUs.')
        cudnn.benchmark = True
        print('Using CUDA..')
    criterion = nn.CrossEntropyLoss()
    adversary = LinfPGDAttack(epsilon=0.031, k=20, a=0.003)

    saved_models = [v for v in os.listdir(args.checkpoint) if '0.pth.tar' in v]
    saved_models = sorted(saved_models)
    save_text_list('{}/models_names.txt'.format(args.checkpoint), saved_models)
    for saved_model in saved_models:
        # resume Model
        state_dict = torch.load('{}/{}'.format(args.checkpoint, saved_model))
        net.load_state_dict(state_dict['state_dict'])

        adv_train_loss, adv_train_acc = attack_over_test(
            trainloader, net, criterion, adversary)
        adv_test_loss, adv_test_acc = attack_over_test(testloader, net,
                                                       criterion, adversary)

        logger.append(
            [adv_train_loss, adv_test_loss, adv_train_acc, adv_test_acc])

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))