Esempio n. 1
0
def main():

    # 自适应使用GPU还是CPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Net().to(device)

    optimizer = torch.optim.Adam(model.parameters())
    criterion = torch.nn.CrossEntropyLoss()

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   shuffle=True)
    test_loader = Data.DataLoader(dataset=test_data, batch_size=batch_size)

    adversary = FGSMAttack(epsilon=0.2)

    for epoch in range(epochs):
        for t, (x, y) in enumerate(train_loader):

            x_var, y_var = to_var(x), to_var(y.long())
            loss = criterion(model(x_var), y_var)

            # adversarial training
            if epoch + 1 > delay:
                # use predicted label to prevent label leaking
                y_pred = pred_batch(x, model)
                x_adv = adv_train(x, y_pred, model, criterion, adversary)
                x_adv_var = to_var(x_adv)
                loss_adv = criterion(model(x_adv_var), y_var)
                loss = (loss + loss_adv) / 2

            if (t + 1) % 10 == 0:
                print('t = %d, loss = %.8f' % (t + 1, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        # 每跑完一次epoch测试一下准确率 进入测试模式 禁止梯度传递
        with torch.no_grad():
            correct = 0
            total = 0
            sum_val_loss = 0
            for data in test_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)

                val_loss = criterion(outputs, labels)
                sum_val_loss += val_loss.item()
                # 取得分最高的那个类
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum()
            print('epoch=%d accuracy=%.02f%% val_loss=%.02f%' %
                  (epoch + 1, (100 * correct / total), sum_val_loss))
            sum_val_loss = 0.0

    torch.save(model.state_dict(), './cifar-adv-pytorch/net.pth')
Esempio n. 2
0
                                lr=param['learning_rate'],
                                weight_decay=param['weight_decay'])

for epoch in range(param['num_epochs']):

    print('Starting epoch %d / %d' % (epoch + 1, param['num_epochs']))

    for t, (x, y) in enumerate(loader_train):

        x_var, y_var = to_var(x), to_var(y.long())
        loss = criterion(net(x_var), y_var)

        # adversarial training
        if epoch + 1 > param['delay']:
            # use predicted label to prevent label leaking
            y_pred = pred_batch(x, net)
            x_adv = adv_train(x, y_pred, net, criterion, adversary)
            x_adv_var = to_var(x_adv)
            loss_adv = criterion(net(x_adv_var), y_var)
            loss = (loss + loss_adv) / 2

        if (t + 1) % 100 == 0:
            print('t = %d, loss = %.8f' % (t + 1, loss.item()))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

test(net, loader_test)

torch.save(net.state_dict(), 'models/adv_trained_lenet5.pkl')
Esempio n. 3
0
        if loss.item() > 3:
            loss.backward(retain_graph=True)
            torch.nn.utils.clip_grad_norm(n.parameters(), 10.0)
            opt.step()
            epoch_loss += loss.data.item()
            _, predicted = torch.max(c_pre.data, 1)
            total += y_train.size(0)
            correct += predicted.eq(y_train.data).cuda().sum()
            torch.cuda.empty_cache()
        else:
            loss_cl = loss2(c_pre, y_train)

            loss_sum = torch.mul(loss, 1 / 1) + loss_cl
            if epoch + 1 > param['delay']:
                # use predicted label to prevent label leaking
                y_pred = pred_batch(torch.cat((x_train, x_train, x_train), 1),
                                    n)
                x_adv = adv_train(torch.cat((x_train, x_train, x_train), 1),
                                  y_pred, n, loss2, adversary)
                n.zero_grad()
                optimizer.zero_grad()
                x_adv_var = to_var(x_adv)
                y_pre, c_pre = n(x_adv_var)
                loss_adv = loss2(c_pre, y_train) + loss1(
                    torch.mul(y_pre, 1.0),
                    torch.mul(torch.cat(
                        (x_train, x_train, x_train), 1), 1.0)) / 1
                loss_sum = (loss_sum + loss_adv) / 2
            loss_sum.backward(retain_graph=True)
            torch.nn.utils.clip_grad_norm(n.parameters(), 10.0)
            optimizer.step()
            epoch_loss += loss_sum.data.item()
		n.train()
		
		x_train, y_train = Variable(x_train.cuda()), Variable(y_train.cuda())
		
		y_pre,c_pre = n( x_train)
		
		n.zero_grad()
		optimizer.zero_grad()
		_, predicted = torch.max(c_pre.data, 1)
		total += y_train.size(0)
		correct += predicted.eq(y_train.data).cuda().sum()
		loss = loss2(c_pre, y_train)+loss1(torch.mul(y_pre, 1.0), torch.mul( x_train, 1.0))/ 1

		if epoch + 1 > param['delay']:
			# use predicted label to prevent label leaking
			y_pred = pred_batch(x_train, n)
			x_adv = adv_train(x_train, y_pred, n, loss2, adversary)
			x_adv_var = to_var(x_adv)
			y_pre, c_pre = n(x_adv_var)
			loss_adv = loss2( c_pre , y_train)+loss1(torch.mul(y_pre, 1.0), torch.mul(x_adv_var, 1.0))/ 1

			loss =  (loss_adv +loss)/2
		
		loss.backward(retain_graph=True)
		torch.nn.utils.clip_grad_norm(n.parameters(), 5.0)
		optimizer.step()
		epoch_loss += loss.data.item()

		torch.cuda.empty_cache()
		if epoch + 1 > param['delay']:
                    y_pre2, c_pre2 = n(y_pre)