示例#1
0
def attack(targeted_model, random_start=False,args):
    if args.attack=='FGSM':
        from adversarialbox.attacks import FGSMAttack
        adversary=FGSMAttack(targeted_model,args.epsilon)
    if args.attack=='BIM':
        from adversarialbox.attacks import LinfPGDAttack
        adversary=LinfPGDAttack(targeted_model, random_start)
示例#2
0
def main():

    # 自适应使用GPU还是CPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Net().to(device)

    optimizer = torch.optim.Adam(model.parameters())
    criterion = torch.nn.CrossEntropyLoss()

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   shuffle=True)
    test_loader = Data.DataLoader(dataset=test_data, batch_size=batch_size)

    adversary = FGSMAttack(epsilon=0.2)

    for epoch in range(epochs):
        for t, (x, y) in enumerate(train_loader):

            x_var, y_var = to_var(x), to_var(y.long())
            loss = criterion(model(x_var), y_var)

            # adversarial training
            if epoch + 1 > delay:
                # use predicted label to prevent label leaking
                y_pred = pred_batch(x, model)
                x_adv = adv_train(x, y_pred, model, criterion, adversary)
                x_adv_var = to_var(x_adv)
                loss_adv = criterion(model(x_adv_var), y_var)
                loss = (loss + loss_adv) / 2

            if (t + 1) % 10 == 0:
                print('t = %d, loss = %.8f' % (t + 1, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        # 每跑完一次epoch测试一下准确率 进入测试模式 禁止梯度传递
        with torch.no_grad():
            correct = 0
            total = 0
            sum_val_loss = 0
            for data in test_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)

                val_loss = criterion(outputs, labels)
                sum_val_loss += val_loss.item()
                # 取得分最高的那个类
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum()
            print('epoch=%d accuracy=%.02f%% val_loss=%.02f%' %
                  (epoch + 1, (100 * correct / total), sum_val_loss))
            sum_val_loss = 0.0

    torch.save(model.state_dict(), './cifar-adv-pytorch/net.pth')
示例#3
0
# Data loaders
test_dataset = datasets.MNIST(root='../data/',
                              train=False,
                              download=True,
                              transform=transforms.ToTensor())
loader_test = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=param['test_batch_size'],
                                          shuffle=False)

# Setup model to be attacked
net = LeNet5()
net.load_state_dict(torch.load('models/adv_trained_lenet5.pkl'))

if torch.cuda.is_available():
    print('CUDA ensabled.')
    net.cuda()

for p in net.parameters():
    p.requires_grad = False
net.eval()

test(net, loader_test)

# Adversarial attack
adversary = FGSMAttack(net, param['epsilon'])
# adversary = LinfPGDAttack(net, random_start=False)

t0 = time()
attack_over_test_data(net, adversary, param, loader_test)
print('{}s eclipsed.'.format(time() - t0))
示例#4
0
param = {
    'model': 'SimpleNet',
    'patience': args.Epoch,
    'batch_size': args.batchsize,
    'nepochs': args.Epoch,
    'nworkers': 1,
    'seed': 1,
    'data': 'mnist',
    'epsilon': args.epsilon,
}

advtraining = args.advtraining
print('======================================')

if advtraining == 'FGSM':
    adversary = FGSMAttack(epsilon=param['epsilon'])
    T = 0.0
    print('use FGSM adv training')
elif advtraining == 'IFGSM':
    #adversary = LinfPGDAttack(epsilon=param['epsilon'], k=15,order='inf')
    adversary = PGDAttack(epsilon=param['epsilon'],
                          k=15,
                          order='inf',
                          storeadv=args.storeadv)
    T = 0.0
    print('use LinfPGD adv training')

elif advtraining == 'PGD':
    #adversary = LinfPGDAttack(epsilon=param['epsilon'], k=1, order='2')
    adversary = PGDAttack(epsilon=param['epsilon'],
                          k=1,
                p.requires_grad = False
            net.eval()
    else:
        for p in net.parameters():
            p.requires_grad = False
        net.eval()

    for epsilon in epsilon_set:
        train_dataset, val_dataset = utils.get_dataset(params)
        
        loader_test = torch.utils.data.DataLoader(val_dataset, 
            batch_size=256, shuffle=False)    
        #test(net, loader_test)

        if method == 'BayesWRM' or method == 'Bayes':
            adversary = FGSMAttack(model_list, epsilon, is_train=False)
            advacc = attack_over_test_data(model_list, adversary, param, loader_test)
        else:
            adversary = FGSMAttack(net, epsilon, is_train=False, advtraining=method)
            advacc = attack_over_test_data(net, adversary, param, loader_test)

        print('method',method,  'adv accuracy', advacc)
        advacc_set.append(advacc)

    df = pd.DataFrame(
        {'epsilon': list(epsilon_set),
         'advacc': advacc_set})

    df.to_csv(os.path.join('trafficsignwhitebox', method+'_FGSMwhitebox.csv'))

test_dataset = datasets.MNIST(root='../data/', train=False, download=True, 
    transform=transforms.ToTensor())
loader_test = torch.utils.data.DataLoader(test_dataset, 
    batch_size=param['test_batch_size'], shuffle=True)


# Setup the model
net = LeNet5()

if torch.cuda.is_available():
    print('CUDA ensabled.')
    net.cuda()
net.train()

# Adversarial training setup
adversary = FGSMAttack(epsilon=0.3)
#adversary = LinfPGDAttack()

# Train the model
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(net.parameters(), lr=param['learning_rate'])

for epoch in range(param['num_epochs']):

    print('Starting epoch %d / %d' % (epoch + 1, param['num_epochs']))

    for t, (x, y) in enumerate(loader_train):

        x_var, y_var = to_var(x), to_var(y.long())
        loss = criterion(net(x_var), y_var)
示例#7
0
advtraining = args.advtraining
print('======================================')
param = {
        'model'     :   'resnet18',
        'patience'  :   5,
        'batch_size':   64,
        'nepochs'   :   10,
        'nworkers'  :   4,
        'seed'      :   1,
        'data'      :   'fashion',
        'epsilon'   :   args.epsilon,
        }


if advtraining == 'FGSM':
    adversary = FGSMAttack(epsilon=param['epsilon'], storeadv=args.storeadv)
    T = 0.0
    print('use FGSM adv training')
elif advtraining == 'IFGSM':
    adversary = PGDAttack(epsilon=param['epsilon'], k=15,order='inf', storeadv=args.storeadv)
    T = 0.0
    print('use LinfPGD adv training')

elif advtraining == 'PGD':
    #adversary = LinfPGDAttack(epsilon=param['epsilon'], k=1, order='2')
    adversary = PGDAttack(epsilon=param['epsilon'], k=1, order='2', storeadv=args.storeadv)
    T = 0.0
    print('use PGD advtraining')


elif advtraining == 'ERM':
示例#8
0
for name, p in n.named_parameters():
    if 'bias' in name:
        bias_p += [p]
    else:
        weight_p += [p]

trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))

base_params = filter(lambda p: id(p) not in trans_params, n.parameters())
base_params = filter(lambda p: id(p) not in class_params, base_params)
param = {
    'delay': 10,
}
if args.m == 'fgsm':
    adversary = FGSMAttack()
elif args.m == 'pgd':
    adversary = LinfPGDAttack()
else:
    print('wrong method')
    exit(0)
loss1 = nn.MSELoss()
loss1.cuda()
loss2 = nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adamax([{
    'params': base_params
}, {
    'params': n.trans_conv.parameters(),
    'lr': learning_rate
}, {
		bias_p += [p]
	else:
		weight_p += [p]

trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))

base_params = filter(lambda p: id(p) not in trans_params,
                     n.parameters())
base_params = filter(lambda p: id(p) not in class_params,
                     base_params)
param = {
	'delay': 0,
}
if args.m=='fgsm':
	adversary = FGSMAttack(epsilon=8.0/255.0)
elif args.m=='pgd':
	adversary = LinfPGDAttack( epsilon=8.0/255.0, a=2.0/255.0,k=40)
else:
	print('wrong method')
	exit(0)
loss1 = nn.MSELoss()
loss1.cuda()
loss2 = nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adam([{'params': base_params},
                              {'params':n.trans_conv.parameters(),'lr':learning_rate},
                              {'params':n.group2.parameters(),'lr':learning_rate}],
                      lr=learning_rate,weight_decay=wd)

opt = torch.optim.Adam([{'params': base_params},
示例#10
0
            p.requires_grad = False
        net.eval()

    train_dataset, val_dataset = utils.get_dataset(params,
                                                   fixedindex=fixedindex)

    loader_test = torch.utils.data.DataLoader(val_dataset,
                                              batch_size=1,
                                              shuffle=False)
    #test(net, loader_test)

    if method == 'BayesWRM' or method == 'Bayes':
        adversary = FGSMAttack(model_list,
                               epsilon,
                               is_train=False,
                               advtraining=method,
                               inputdist=inputdist,
                               noiseratio=noiseratio,
                               storeadv=True)
        #adversary = CWAttack(model_list, steps=k)
        advacc = attack_over_test_data(model_list, adversary, param,
                                       loader_test)
    else:
        adversary = FGSMAttack(net,
                               epsilon,
                               is_train=False,
                               advtraining=method,
                               storeadv=True)
        #adversary = CWAttack(net, steps=k)
        advacc = attack_over_test_data(net, adversary, param, loader_test)
    print('method', method, 'adv accuracy', advacc)
示例#11
0
            for p in net.parameters():
                p.requires_grad = False
            net.eval()
    else:
        for p in net.parameters():
            p.requires_grad = False
        net.eval()

    for epsilon in epsilon_set:
        # Data loaders
        valset = FashionMNIST('data', train=False, transform=val_transforms)
        loader_test = torch.utils.data.DataLoader(
            valset, batch_size=param['test_batch_size'], shuffle=False)
        #test(net, loader_test)

        if method == 'BayesWRM' or method == 'Bayes':
            adversary = FGSMAttack(model_list, epsilon, is_train=False)

            advacc = attack_over_test_data(model_list, adversary, param,
                                           loader_test)
        else:
            adversary = FGSMAttack(net, epsilon, is_train=False)
            advacc = attack_over_test_data(net, adversary, param, loader_test)

        print('method', method, 'adv accuracy', advacc)
        advacc_set.append(advacc)

    df = pd.DataFrame({'epsilon': list(epsilon_set), 'advacc': advacc_set})

    df.to_csv(os.path.join('fashionmnistwhitebox', method + '_whitebox.csv'))