示例#1
0
 def __init__(self,
              args,
              model,
              nb_iter,
              loss_fn=nn.CrossEntropyLoss(reduction="sum")):
     super(PGDAttack, self).__init__(args, model, nb_iter, loss_fn)
     self.args = args
     self.model = model
     if args.attack_ball == 'Linf':
         self.adversary = LinfPGDAttack(self.model,
                                        loss_fn=loss_fn,
                                        eps=args.epsilon,
                                        nb_iter=nb_iter,
                                        eps_iter=0.01,
                                        rand_init=True,
                                        clip_min=args.clip_min,
                                        clip_max=args.clip_max,
                                        targeted=False)
     elif args.attack_ball == 'L2':
         self.adversary = L2PGDAttack(self.model,
                                      loss_fn=loss_fn,
                                      eps=args.epsilon,
                                      nb_iter=nb_iter,
                                      eps_iter=0.01,
                                      rand_init=True,
                                      clip_min=args.clip_min,
                                      clip_max=args.clip_max,
                                      targeted=False)
     else:
         raise NotImplementedError
示例#2
0
def whitebox_pgd(args, model):
    adversary = L2PGDAttack(model,
                            loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                            eps=0.3,
                            nb_iter=40,
                            eps_iter=0.01,
                            rand_init=True,
                            clip_min=-1.0,
                            clip_max=1.0,
                            targeted=False)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=8)
    train_itr = tqdm(enumerate(train_loader), total=len(train_loader.dataset))
    correct = 0
    for batch_idx, (data, target) in train_itr:
        x, target = data.to(args.device), target.to(args.device)
        adv_image = adversary.perturb(x, target)
        pred = model(adv_image)
        out = pred.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct += out.eq(target.unsqueeze(1).data)
    acc = 100. * correct.cpu().numpy() / len(train_loader.dataset)
    print("PGD attack succes rate %f" % (acc))
示例#3
0
    def create_adv_input(self, x, y, model):
        # Prepare copied model
        model = copy.deepcopy(model)

        # Prepare input and corresponding label
        data = torch.from_numpy(np.expand_dims(x, axis=0).astype(np.float32))
        target = torch.from_numpy(np.array([y]).astype(np.int64))
        data.requires_grad = True

        from advertorch.attacks import L2PGDAttack
        adversary = L2PGDAttack(model.forward,
                                eps=self.eps,
                                nb_iter=self.nb_iter,
                                eps_iter=self.eps_iter)
        perturbed_data = adversary.perturb(data, target)

        # Have to be different
        output = model.forward(perturbed_data)
        final_pred = output.max(
            1, keepdim=True)[1]  # get the index of the max log-probability

        if final_pred.item() == target.item():
            return perturbed_data, 0
        else:
            return perturbed_data, 1
示例#4
0
 def init_pgdl2(self, model):
     return L2PGDAttack(model,
                        loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                        eps=self.args['test_epsilon'] / 255.0,
                        nb_iter=self.spectral_args["test_nb_iter"],
                        eps_iter=self.spectral_args["test_eps_iter"],
                        rand_init=True,
                        clip_min=self.spectral_args['clip_min'],
                        clip_max=self.spectral_args['clip_max'],
                        targeted=False)
示例#5
0
def validate(val_loader, model, criterion, args):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        adversary = L2PGDAttack(
        model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=14.2737,
        nb_iter=20, eps_iter=1.784, rand_init=True, clip_min=-2.1179, clip_max=2.6400,
        targeted=False)

        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.is_available():
                target = target.cuda(args.gpu, non_blocking=True)
            with torch.enable_grad():
                adv_untargeted = adversary.perturb(images, target)
            # compute output
            output = model(adv_untargeted)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.display(i)

        # TODO: this should also be done with the ProgressMeter
        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
              .format(top1=top1, top5=top5))

    return top1.avg
示例#6
0
def robust_acc(model, device, loader, epsilon=0.3, nb_iter=40, step_size=0.01):
    '''
    Return the robust accuracy of a given model.
    '''
    correct = 0
    total = 0

    model.to(device).eval()
    adversary = L2PGDAttack(model, loss_fn = nn.CrossEntropyLoss(reduction = "sum"), eps = epsilon, \
                              nb_iter = nb_iter, eps_iter = step_size, rand_init = True, clip_min = 0.0, clip_max = 1.0, \
                              targeted = False)
    for cln_data, label in loader:
        cln_data, label = cln_data.to(device), label.to(device)
        adv_untargeted = adversary.perturb(cln_data, label)

        with torch.no_grad():
            output = model(adv_untargeted)

        total += label.shape[0]
        correct += (torch.argmax(output, dim=1) == label).sum().item()

    return correct / total
def generate(datasetname, batch_size):
    save_dir_path = "{}/data_adv_defense/guided_denoiser".format(PY_ROOT)
    os.makedirs(save_dir_path, exist_ok=True)
    set_log_file(save_dir_path + "/generate_{}.log".format(datasetname))
    data_loader = DataLoaderMaker.get_img_label_data_loader(datasetname, batch_size, is_train=True)
    attackers = []
    for model_name in MODELS_TRAIN_STANDARD[datasetname] + MODELS_TEST_STANDARD[datasetname]:
        model = StandardModel(datasetname, model_name, no_grad=False)
        model = model.cuda().eval()
        linf_PGD_attack =LinfPGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.031372, nb_iter=30,
                      eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False)
        l2_PGD_attack = L2PGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"),eps=4.6,
                                    nb_iter=30,clip_min=0.0, clip_max=1.0, targeted=False)
        FGSM_attack = FGSM(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"))
        momentum_attack = MomentumIterativeAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.031372, nb_iter=30,
                      eps_iter=0.01, clip_min=0.0, clip_max=1.0, targeted=False)
        attackers.append(linf_PGD_attack)
        attackers.append(l2_PGD_attack)
        attackers.append(FGSM_attack)
        attackers.append(momentum_attack)
        log.info("Create model {} done!".format(model_name))

    generate_and_save_adv_examples(datasetname, data_loader, attackers, save_dir_path)
                                           num_workers=4)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=100,
                                          shuffle=False,
                                          num_workers=4)

net = ResNet18()
net = net.to(device)
net = torch.nn.DataParallel(net)
cudnn.benchmark = True

adversary = L2PGDAttack(net,
                        loss_fn=nn.CrossEntropyLoss(),
                        eps=0.25,
                        nb_iter=100,
                        eps_iter=0.01,
                        rand_init=True,
                        clip_min=0.0,
                        clip_max=1.0,
                        targeted=False)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=learning_rate,
                      momentum=0.9,
                      weight_decay=0.0002)


def train(epoch):
    print('\n[ Train epoch: %d ]' % epoch)
    net.train()
    train_loss = 0
示例#9
0
# load model
model = MNISTM_Model()

# setup optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)

loss_func = torch.nn.CrossEntropyLoss()

model = model.cuda()
loss_func = loss_func.cuda()

adversary_train = L2PGDAttack(model,
                              loss_fn=nn.CrossEntropyLoss().cuda(),
                              eps=80 / 255,
                              nb_iter=40,
                              eps_iter=0.01,
                              rand_init=True,
                              clip_min=0,
                              clip_max=1.0,
                              targeted=False)

adversary_test = L2PGDAttack(model,
                             loss_fn=nn.CrossEntropyLoss().cuda(),
                             eps=80 / 255,
                             nb_iter=100,
                             eps_iter=0.01,
                             rand_init=True,
                             clip_min=0,
                             clip_max=1.0,
                             targeted=False)
示例#10
0
def train(model, device, trainloader, testloader, loss_fn, optimizer, epochs = 1, verbose = 0, ckpt_folder = None, \
          adv = False, epsilon = 0.3, nb_iter = 40, step_size = 0.01, \
          regularizer = False, mu = 1, tau = 1, beta = 1):
    '''
    Train a model, returning the model in train mode on the device.

    Value of verbose:
    0 -- Only print training loss
    1 -- Print training loss and training acc
    2 -- Print training loss, training error and test acc

    If ckpt_path != None, then assume verbosr = 2
    '''

    print('Train %s on %s' % (model.__class__.__name__, device))

    model.to(device).train()

    for i in range(epochs):

        total_loss = 0

        for img, label in trainloader:
            # label is a tensor, one number for each image
            img, label = img.to(device), label.to(device)

            if adv == True:
                model.eval()
                adversary = L2PGDAttack(model, loss_fn = nn.CrossEntropyLoss(reduction = "sum"), eps = epsilon, \
                                        nb_iter = nb_iter, eps_iter = step_size, rand_init = True, clip_min = 0.0, clip_max = 1.0, \
                                        targeted = False)
                img = adversary.perturb(img, label)
                model.train()

            model.zero_grad()

            output = model(img)
            loss = loss_fn(output, label)

            if regularizer == True:
                n_samples = output.shape[0]
                index = torch.arange(n_samples, device=device)
                output = output[index, label].reshape((n_samples, 1)) - output
                output[index, label] = torch.tensor(1e10, device=device)
                alpha, _ = torch.min(output, dim=1)
                # regularizer is H_tau(\alpha) - H_0(\alpha)
                # if model.__class__.__name__ != 'CIFARCNN':
                #     loss += mu * torch.mean(torch.max(torch.tensor(0, dtype = torch.float, device = device), tau - alpha))
                # else:
                loss += mu * torch.mean(torch.clamp(-alpha, min=0, max=tau))

            if beta > 0:
                loss += orthogonal_constraint(model, device=device, beta=beta)

            loss.backward()

            optimizer.step()

            total_loss += loss.item()

        total_loss /= len(trainloader)

        # Start to evaluate model
        model.eval()

        if ckpt_folder != None:
            verbose = 2

        if verbose == 0:
            print('Epoch : %d, Loss : %.10f' % (i + 1, total_loss))
        elif verbose == 1:
            train_acc = acc(model, device, trainloader)
            print('Epoch : %d, Loss : %.10f, Training Acc : %f' %
                  (i + 1, total_loss, train_acc))
        elif verbose == 2:
            train_acc = acc(model, device, trainloader)
            test_acc = acc(model, device, testloader)
            print(
                'Epoch : %d, Loss : %.10f, Training Acc : %f, Test Acc : %f' %
                (i + 1, total_loss, train_acc, test_acc))
        else:
            assert (0)

        if ckpt_folder != None:
            checkpoint = {'epochs' : i + 1, \
                          'loss' : total_loss, \
                          'train_acc' : train_acc, \
                          'test_acc' : test_acc, \
                          'model_state_dict' : model.state_dict(), \
                          'optmizer_state_dict' : optimizer.state_dict(), \
                          }
            torch.save(
                checkpoint, ckpt_folder + '/' + model.__class__.__name__ +
                '_' + ('' if regularizer == False and beta < 1e-15 else
                       ('reg_' if regularizer == 1 else 'constraint_')) +
                str(i + 1).zfill(5) + '.tar')

        # Set model back to train mode for the next epoch
        model.train()

    return model
示例#11
0
def validate(val_loader, model, criterion, args):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
                             prefix='Test: ')

    # switch to evaluate mode
    model.eval()
    de_transform = denormalize_transform()
    with torch.no_grad():
        adversary = L2PGDAttack(model,
                                loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                                eps=14.2737,
                                nb_iter=20,
                                eps_iter=1.784,
                                rand_init=True,
                                clip_min=-2.1179,
                                clip_max=2.6400,
                                targeted=False)

        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            # save_image(images[0], 'img1.png')
            # de_transform(images)
            save_image(images[2], 'img1.png')
            print(images.shape)
            grid_img = torchvision.utils.make_grid(images, nrow=2)
            save_image(grid_img, 'grid_img.png')

            # image_numpy=images.numpy()
            # temp=image_numpy.transpose(0,2,3,1)
            # img = Image.fromarray(temp[0].squeeze(),'RGB')
            # img.save('my.png')
            # img.show()

            # image = np.asarray(bytearray(image_numpy), dtype="uint8")
            # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            # cv2.imshow('image',image)
            # cv2.waitKey(0)

            # print(temp.shape)
            # print(type(images))
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.is_available():
                target = target.cuda(args.gpu, non_blocking=True)
            with torch.enable_grad():
                adv_untargeted = adversary.perturb(images, target)
            adv_grid_img = torchvision.utils.make_grid(adv_untargeted, nrow=2)
            save_image(adv_grid_img, 'adv_grid_img.png')
            diff = adv_untargeted - images
            diff_norm = torch.norm(diff, p=2, dim=(1, 2, 3))
            print(diff_norm)
            return
            # compute output
            output = model(adv_untargeted)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.display(i)
        plt.show()
        # TODO: this should also be done with the ProgressMeter
        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))

    return top1.avg
    return (test_loss / batch_idx, acc)


train_loss, train_acc = test(trainloader)
test_loss, test_acc = test(testloader)
test_loss, test_acc = test(testloader, do_awgn=True)

adversary_linf = LinfPGDAttack(net,
                               loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                               eps=0.03,
                               nb_iter=20,
                               eps_iter=0.003,
                               rand_init=False,
                               clip_min=-2.0,
                               clip_max=2.0,
                               targeted=False)
adver_lss, adver_acc = test_adver(testloader, adversary_linf)
print('Linf acc', adver_acc)

adversary_l2 = L2PGDAttack(net,
                           loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                           eps=np.sqrt(3 * 32 * 32) * 0.03,
                           nb_iter=20,
                           eps_iter=np.sqrt(3 * 32 * 32) * 0.003,
                           rand_init=False,
                           clip_min=-2.0,
                           clip_max=2.0,
                           targeted=False)
adver_lss, adver_acc = test_adver(testloader, adversary_l2)
print('L2 acc', adver_acc)
    print(args)

    device = "cuda"

    set_seed(args.seed)

    testset, normalize, unnormalize = str2dataset(args.dataset)
    testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=0)

    net = str2model(args.checkpoint, dataset=args.dataset, pretrained=True).eval().to(device)

    if args.norm is None:
        adversary = Clean()

    elif args.norm == "2":
        adversary = L2PGDAttack(predict=lambda x: net(normalize(x)), loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps,
                                nb_iter=args.nb_iter, eps_iter=args.lr, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False)
    elif args.norm == "inf":
        adversary = LinfPGDAttack(predict=lambda x: net(normalize(x)), loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps,
                                  nb_iter=args.nb_iter, eps_iter=args.lr, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False)

    acc = test(lambda x: net(normalize(x)),
               testloader,
               device=device,
               attacker=adversary,
               num_batch=args.num_batch,
               save_img_loc=args.save_img_loc)

    print(acc)
示例#14
0
def validate(adversary_settings, model, criterion, val_loader, device):
    """
    Runs a single epoch of validation using Madry's adversarial method
    """
    # The model should not accumulate gradients
    # However, adversary will require gradients
    # Don't wrap this function with torch.no_grad()
    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    val_loss = 0.0
    val_correct = 0.0
    val_total = 0.0
    adv_val_loss = 0.0
    adv_val_correct = 0.0

    adv_epsilon, adv_iterations, adv_step = adversary_settings
    adversary = L2PGDAttack(
        model,
        loss_fn=torch.nn.CrossEntropyLoss(reduction="sum"),
        eps=adv_epsilon,
        nb_iter=adv_iterations,
        eps_iter=adv_step,
        rand_init=True,
        clip_min=-1.0,
        clip_max=1.0,
        targeted=False,
    )

    for examples, labels in val_loader:
        examples, labels = examples.to(device), labels.to(device)
        # need to explicitly represent 1 input channel
        examples = torch.unsqueeze(examples, 1)
        outputs = model(examples)

        # Normal
        loss = criterion(outputs, labels)
        val_loss += loss.data.item()

        _, predictions = torch.max(outputs, 1)
        val_correct += (predictions == labels).sum().item()
        val_total += labels.shape[0]

        # Generate adversarial training examples
        adv_examples = adversary.perturb(examples, labels)

        adv_outputs = model(adv_examples)

        adv_loss = criterion(adv_outputs, labels)
        adv_val_loss += adv_loss.data.item()

        _, predictions = torch.max(adv_outputs, 1)
        adv_val_correct += (predictions == labels).sum().item()

    for param in model.parameters():
        param.requires_grad = True

    return val_loss / len(
        val_loader), 100 * val_correct / val_total, adv_val_loss / len(
            val_loader), 100 * adv_val_correct / val_total
示例#15
0
def train_model(
    train_adversary_settings,
    val_adversary_settings,
    model,
    optimizer,
    criterion,
    train_loader,
    val_loader,
    num_epochs,
    tensorboard_writer,
    device,
    save_settings,
):
    do_saving, save_directory, save_filename, starting_epoch, best_acc = save_settings

    # Initialize PGD adversary
    adv_epsilon, adv_iterations, adv_step = train_adversary_settings
    adversary = L2PGDAttack(
        model,
        loss_fn=torch.nn.CrossEntropyLoss(reduction="sum"),
        eps=adv_epsilon,
        nb_iter=adv_iterations,
        eps_iter=adv_step,
        rand_init=True,
        clip_min=0.0,
        clip_max=1.0,
        targeted=False,
    )

    for e in range(num_epochs):

        # Adversarial training loop
        train_loss, train_acc = train_loop_adversarial(adversary, model,
                                                       optimizer, criterion,
                                                       train_loader, device)

        tensorboard_writer.add_scalar("Loss/Train", train_loss,
                                      starting_epoch + e)
        tensorboard_writer.add_scalar("Acc/Train", train_acc,
                                      starting_epoch + e)

        val_loss, val_acc, adv_val_loss, adv_val_acc = validate(
            val_adversary_settings, model, criterion, val_loader, device)

        tensorboard_writer.add_scalar("Loss/Val", val_loss, starting_epoch + e)
        tensorboard_writer.add_scalar("Acc/Val", val_acc, starting_epoch + e)
        tensorboard_writer.add_scalar("Loss/Adv_Val", adv_val_loss,
                                      starting_epoch + e)
        tensorboard_writer.add_scalar("Acc/Adv_Val", adv_val_acc,
                                      starting_epoch + e)

        if do_saving:
            is_best = False
            if val_acc > best_acc:
                best_acc = val_acc
                is_best = True

            state = {
                "model_state": model.state_dict(),
                "accuracy": val_acc,
                "epoch": starting_epoch + e,
                "optimizer_state": optimizer.state_dict(),
            }
            save_checkpoint(state, save_directory, save_filename, is_best)
示例#16
0
    return [
        float(correct[:k].reshape(-1).float().sum(0,
                                                  keepdim=True).cpu().numpy())
        for k in topk
    ]


resnet = models.resnet50(pretrained=True)
resnet = resnet.cuda()
model.visual = model.visual.eval()

resnet_adversary = L2PGDAttack(resnet,
                               loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                               eps=9.9756,
                               nb_iter=20,
                               eps_iter=1.24695,
                               rand_init=True,
                               clip_min=-2.1179,
                               clip_max=2.6400,
                               targeted=False)

adversary = L2PGDAttack(model.visual,
                        loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                        eps=14.2737,
                        nb_iter=20,
                        eps_iter=1.784,
                        rand_init=True,
                        clip_min=-2.1179,
                        clip_max=2.6400,
                        targeted=False)
with torch.no_grad():
示例#17
0
    print('Adv acc:', accu)

    return test_adv_data, test_adv_labels


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
    parser.add_argument('--eps', default=80/255, type=float, help='eps')

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id

    attacker = L2PGDAttack(model, loss_fn=nn.CrossEntropyLoss().cuda(), eps=args.eps,
                               nb_iter=100, eps_iter=0.01, rand_init=True, clip_min=0, clip_max=1.0,
                               targeted=False)

    adv_data_save_path = os.path.join('dataset','l2_mnistm_advT')
    os.makedirs(adv_data_save_path, exist_ok=True)

    ########### generating train
    train_adv_data, train_adv_labels = train()

    np.save(adv_data_save_path + '/train_eps' + str(args.eps), [train_adv_data, train_adv_labels])

    ########### generating test
    test_adv_data, test_adv_labels = test()

    np.save(adv_data_save_path + '/test_eps' + str(args.eps), [test_adv_data, test_adv_labels])
示例#18
0
    adversary = LinfPGDAttack(net,
                              loss_fn=cross_entropy_loss(),
                              eps=args.eps,
                              nb_iter=40,
                              eps_iter=0.01,
                              rand_init=True,
                              clip_min=0.0,
                              clip_max=1.0,
                              targeted=False)
else:

    adversary = L2PGDAttack(net,
                            loss_fn=csl,
                            eps=args.eps,
                            nb_iter=10,
                            eps_iter=12.75,
                            rand_init=True,
                            clip_min=0.0,
                            clip_max=255.,
                            targeted=False)
    #adversary = PGDAttack(net, loss_fn=csl, eps=args.eps, nb_iter=10, eps_iter=12.75, rand_init=True, clip_min=0.0,clip_max=255., targeted=False)
    #adversary = L2BasicIterativeAttack(net, loss_fn=csl, eps=args.eps, nb_iter=10, eps_iter=12.75, clip_min=0.0, clip_max=255., targeted=False)
    #adversary =MomentumIterativeAttack(net, loss_fn=csl, eps=args.eps, nb_iter=10, eps_iter=12.75, clip_min=0.0, clip_max=255., targeted=False)
    # adversary = JSMA(net, num_classes = 24, clip_min=0.0, clip_max=255.,theta=1.0, gamma=1.0,loss_fn=nn.CrossEntropyLoss())

net.eval()
# net2.eval()
# net3.eval()
# net4.eval()
# net5.eval()
correct = 0
示例#19
0
def validate(val_loader, model, criterion, args):
    print("validating")
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
                             prefix='Test: ')

    # switch to evaluate mode
    model.eval()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    with torch.no_grad():
        # adversary = LinfPGDAttack(
        # model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.15,
        # nb_iter=40, eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0,
        # targeted=False)
        adversary = L2PGDAttack(model,
                                loss_fn=nn.CrossEntropyLoss(reduction="sum"),
                                eps=14.2737,
                                nb_iter=20,
                                eps_iter=1.784,
                                rand_init=True,
                                clip_min=-2.1179,
                                clip_max=2.6400,
                                targeted=False)
        end = time.time()
        print("enumerate dataloader")
        for i, (images, target) in enumerate(val_loader):
            # print(images)
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.is_available():
                target = target.cuda(args.gpu, non_blocking=True)
            with torch.enable_grad():
                adv_untargeted = adversary.perturb(images.cuda(),
                                                   target.cuda())
            # compute output
            if args.arch == 'simclr':
                output = model(adv_untargeted)
            elif args.arch == 'linf_4' or args.arch == 'linf_8' or args.arch == 'l2_3':
                output = model((adv_untargeted))
            elif args.arch == 'resnet50_l2_eps1':
                output = model(adv_untargeted)
            else:
                output = model((adv_untargeted))
            loss = criterion(output, target)
            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.display(i)

        # TODO: this should also be done with the ProgressMeter
    print(args.arch)
    print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                top5=top5))
    accuracy_array = []
    accuracy_array.append(top1.avg)
    accuracy_array.append(top5.avg)
    np.save(f'/content/gdrive/MyDrive/model_adv_loss/{args.arch}_accuracy.npy',
            accuracy_array)
    return top1.avg, top5.avg
示例#20
0
  adversary = LinfPGDAttack(
  model.visual, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=4.7579/1020,
  nb_iter=20, eps_iter=0.000233, rand_init=True, clip_min=-2.1179, clip_max=2.6400,
  targeted=False)
if args.attack=="inf4":
  adversary = LinfPGDAttack(
  model.visual, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=19.0316/255,
  nb_iter=20, eps_iter=47.579/5100, rand_init=True, clip_min=-2.1179, clip_max=2.6400,
  targeted=False)
if args.attack=='2':
#               adversary = L2PGDAttack(
#               self.model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=14.2737,
#               nb_iter=20, eps_iter=1.784, rand_init=True, clip_min=-2.1179, clip_max=2.6400,
#               targeted=False)
  adversary = L2PGDAttack(
  model.visual, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.7137,
  nb_iter=20, eps_iter=0.09, rand_init=True, clip_min=-2.1179, clip_max=2.6400,
  targeted=False)
with torch.no_grad():
    top1, top5, n = 0., 0., 0.
    for i, (images, target) in enumerate(tqdm(loader)):
        images = images.cuda()
        target = target.cuda()
        with torch.enable_grad():
            adv_untargeted = adversary.perturb(images, target)
        # predict
        image_features = model.encode_image(adv_untargeted)
        image_features /= image_features.norm(dim=-1, keepdim=True)
        logits = 100. * image_features @ zeroshot_weights

        # measure accuracy
        acc1, acc5 = accuracy(logits, target, topk=(1, 5))