def run_ens_cw_attack(
        test_loader,
        model,
        model1,
        model2,
        model3,
        model4  # , model3 model4, model5, model6
    ,
        num_classes,
        loss_str='l2'):
    start = time.time()
    total = 0
    correct = 0
    adv_examples = []
    print_freq = 10
    j = 0
    for i, (images, labels) in enumerate(test_loader):
        images = images.cuda()
        labels = labels.cuda()
        correctly_predicted_ids = get_correct_pred_batchs(
            images, labels, model)
        if len(correctly_predicted_ids) > 0:
            total += len(correctly_predicted_ids)
            images = [images[i] for i in correctly_predicted_ids]
            images = torch.stack(images)  # .squeeze()
            labels = [labels[i] for i in correctly_predicted_ids]
            labels = torch.stack(labels)  # .squeeze()
            # print(images.size())
            adv_data = adversary.cw(model, images.data.clone(), labels.cpu(),
                                    1.0, loss_str, num_classes)  # linf
            correct_ids = get_ensemble_pred_batches(adv_data, labels, model1,
                                                    model2, model3, model4)
            correct += len(correct_ids)

            # adv_outputs = model(adv_data)
            # _, adv_pred = torch.max(adv_outputs.data, 1)
            # # print(adv_pred)
            # for k in range(len(adv_pred)):
            #     if adv_pred[k].item() != labels[k].item():
            #         clean_ex = images[k].squeeze().detach().cpu().numpy()
            #         adv_examples.append((labels[k].item(), labels[k].item(), clean_ex))
            #         adv_ex = adv_data[k].squeeze().detach().cpu().numpy()
            #         adv_examples.append((labels[k].item(), adv_pred[k].item(), adv_ex))

            if i % print_freq == 0:
                print("Correct vs Total = {:.2f}/{:.2f} = {:.2f}".format(
                    correct, total, correct / total))
    print(
        'Accuracy of the network on the {} test images where {} can withstand attack: {:.2f} %'
        .format(total, correct, 100 * correct / total))
    end = time.time()
    print("Execution time: ", end - start)
    return adv_examples
def run_ens_cw_attack1(
        test_loader,
        model,
        model1,
        model2,
        model3,
        model4  # , model3 model4, model5, model6
    ,
        num_classes,
        loss_str='l2'):
    start = time.time()
    total = 0
    correct = 0
    adv_examples = []
    print_freq = 10
    j = 0
    for i, (images, labels) in enumerate(test_loader):
        images = images.cuda()
        labels = labels.cuda()
        # correctly_predicted_ids = get_correct_pred_batchs(images, labels, model)
        # if len(correctly_predicted_ids)>0:
        #     total += len(correctly_predicted_ids)
        #     images = [images[i] for i in correctly_predicted_ids]
        #     images = torch.stack(images)# .squeeze()
        #     labels = [labels[i] for i in correctly_predicted_ids]
        #     labels = torch.stack(labels)# .squeeze()
        # print(images.size())
        total += labels.size()[0]
        adv_data = adversary.cw(model, images.data.clone(), labels.cpu(), 1.0,
                                loss_str, num_classes)  # linf
        correct_ids = get_ensemble_pred_batches(adv_data, labels, model1,
                                                model2, model3, model4)
        correct += len(correct_ids)
        if i % print_freq == 0:
            print("Correct vs Total = {:.2f}/{:.2f} = {:.2f}".format(
                correct, total, correct / total))
    print(
        'Accuracy of the network on the {} test images where {} can withstand attack: {:.2f} %'
        .format(total, correct, 100 * correct / total))
    end = time.time()
    print("Execution time: ", end - start)
    return adv_examples
Example #3
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
    if args.adv_type == 'FGSM':
        adv_noise = 0.05
    elif args.adv_type == 'BIM':
        adv_noise = 0.01
    elif args.adv_type == 'DeepFool':
        if args.net_type == 'resnet':
            if args.dataset == 'cifar10':
                adv_noise = 0.18
            elif args.dataset == 'cifar100':
                adv_noise = 0.03
            else:
                adv_noise = 0.1
        else:
            if args.dataset == 'cifar10':
                adv_noise = 0.6
            elif args.dataset == 'cifar100':
                adv_noise = 0.1
            else:
                adv_noise = 0.5

    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([transforms.ToTensor(), \
                                           transforms.Normalize((125.3/255, 123.0/255, 113.9/255), \
                                                                (63.0/255, 62.1/255.0, 66.7/255.0)),])
        min_pixel = -1.98888885975
        max_pixel = 2.12560367584
        if args.dataset == 'cifar10':
            if args.adv_type == 'FGSM':
                random_noise_size = 0.21 / 4
            elif args.adv_type == 'BIM':
                random_noise_size = 0.21 / 4
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.13 * 2 / 10
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.03 / 2
        elif args.dataset == 'cifar100':
            if args.adv_type == 'FGSM':
                random_noise_size = 0.21 / 8
            elif args.adv_type == 'BIM':
                random_noise_size = 0.21 / 8
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.13 * 2 / 8
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.06 / 5
        else:
            if args.adv_type == 'FGSM':
                random_noise_size = 0.21 / 4
            elif args.adv_type == 'BIM':
                random_noise_size = 0.21 / 4
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.16 * 2 / 5
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.07 / 2
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([transforms.ToTensor(), \
                                           transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])

        min_pixel = -2.42906570435
        max_pixel = 2.75373125076
        if args.dataset == 'cifar10':
            if args.adv_type == 'FGSM':
                random_noise_size = 0.25 / 4
            elif args.adv_type == 'BIM':
                random_noise_size = 0.13 / 2
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.25 / 4
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.05 / 2
        elif args.dataset == 'cifar100':
            if args.adv_type == 'FGSM':
                random_noise_size = 0.25 / 8
            elif args.adv_type == 'BIM':
                random_noise_size = 0.13 / 4
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.13 / 4
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.05 / 2
        else:
            if args.adv_type == 'FGSM':
                random_noise_size = 0.25 / 4
            elif args.adv_type == 'BIM':
                random_noise_size = 0.13 / 2
            elif args.adv_type == 'DeepFool':
                random_noise_size = 0.126
            elif args.adv_type == 'CWL2':
                random_noise_size = 0.05 / 1

    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    _, test_loader = data_loader.getTargetDataSet(args.dataset,
                                                  args.batch_size,
                                                  in_transform, args.dataroot)

    print('Attack: ' + args.adv_type + ', Dist: ' + args.dataset + '\n')
    model.eval()
    adv_data_tot, clean_data_tot, noisy_data_tot = 0, 0, 0
    label_tot = 0

    correct, adv_correct, noise_correct = 0, 0, 0
    total, generated_noise = 0, 0

    criterion = nn.CrossEntropyLoss().cuda()

    selected_list = []
    selected_index = 0

    for data, target in test_loader:
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag = pred.eq(target.data).cpu()
        correct += equal_flag.sum()

        noisy_data = torch.add(data.data, random_noise_size,
                               torch.randn(data.size()).cuda())
        noisy_data = torch.clamp(noisy_data, min_pixel, max_pixel)

        if total == 0:
            clean_data_tot = data.clone().data.cpu()
            label_tot = target.clone().data.cpu()
            noisy_data_tot = noisy_data.clone().cpu()
        else:
            clean_data_tot = torch.cat(
                (clean_data_tot, data.clone().data.cpu()), 0)
            label_tot = torch.cat((label_tot, target.clone().data.cpu()), 0)
            noisy_data_tot = torch.cat(
                (noisy_data_tot, noisy_data.clone().cpu()), 0)

        # generate adversarial
        model.zero_grad()
        inputs = Variable(data.data, requires_grad=True)
        output = model(inputs)
        loss = criterion(output, target)
        loss.backward()

        if args.adv_type == 'FGSM':
            gradient = torch.ge(inputs.grad.data, 0)
            gradient = (gradient.float() - 0.5) * 2
            if args.net_type == 'densenet':
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
            else:
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        elif args.adv_type == 'BIM':
            gradient = torch.sign(inputs.grad.data)
            for k in range(5):
                inputs = torch.add(inputs.data, adv_noise, gradient)
                inputs = torch.clamp(inputs, min_pixel, max_pixel)
                inputs = Variable(inputs, requires_grad=True)
                output = model(inputs)
                loss = criterion(output, target)
                loss.backward()
                gradient = torch.sign(inputs.grad.data)
                if args.net_type == 'densenet':
                    gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
                    gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
                    gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
                else:
                    gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
                    gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
                    gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        if args.adv_type == 'DeepFool':
            _, adv_data = adversary.deepfool(model, data.data.clone(), target.data.cpu(), \
                                             args.num_classes, step_size=adv_noise, train_mode=False)
            adv_data = adv_data.cuda()
        elif args.adv_type == 'CWL2':
            _, adv_data = adversary.cw(model,
                                       data.data.clone(),
                                       target.data.cpu(),
                                       1.0,
                                       'l2',
                                       crop_frac=1.0)
        else:
            adv_data = torch.add(inputs.data, adv_noise, gradient)

        adv_data = torch.clamp(adv_data, min_pixel, max_pixel)

        # measure the noise
        temp_noise_max = torch.abs(
            (data.data - adv_data).view(adv_data.size(0), -1))
        temp_noise_max, _ = torch.max(temp_noise_max, dim=1)
        generated_noise += torch.sum(temp_noise_max)

        if total == 0:
            flag = 1
            adv_data_tot = adv_data.clone().cpu()
        else:
            adv_data_tot = torch.cat((adv_data_tot, adv_data.clone().cpu()), 0)

        output = model(Variable(adv_data, volatile=True))
        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_adv = pred.eq(target.data).cpu()
        adv_correct += equal_flag_adv.sum()

        output = model(Variable(noisy_data, volatile=True))
        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_noise = pred.eq(target.data).cpu()
        noise_correct += equal_flag_noise.sum()

        for i in range(data.size(0)):
            if equal_flag[i] == 1 and equal_flag_noise[
                    i] == 1 and equal_flag_adv[i] == 0:
                selected_list.append(selected_index)
            selected_index += 1

        total += data.size(0)

    selected_list = torch.LongTensor(selected_list)
    clean_data_tot = torch.index_select(clean_data_tot, 0, selected_list)
    adv_data_tot = torch.index_select(adv_data_tot, 0, selected_list)
    noisy_data_tot = torch.index_select(noisy_data_tot, 0, selected_list)
    label_tot = torch.index_select(label_tot, 0, selected_list)

    torch.save(
        clean_data_tot, '%s/clean_data_%s_%s_%s.pth' %
        (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(
        adv_data_tot, '%s/adv_data_%s_%s_%s.pth' %
        (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(
        noisy_data_tot, '%s/noisy_data_%s_%s_%s.pth' %
        (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(
        label_tot, '%s/label_%s_%s_%s.pth' %
        (args.outf, args.net_type, args.dataset, args.adv_type))

    print('Adversarial Noise:({:.2f})\n'.format(generated_noise / total))
    print('Final Accuracy: {}/{} ({:.2f}%)\n'.format(correct, total,
                                                     100. * correct / total))
    print('Adversarial Accuracy: {}/{} ({:.2f}%)\n'.format(
        adv_correct, total, 100. * adv_correct / total))
    print('Noisy Accuracy: {}/{} ({:.2f}%)\n'.format(
        noise_correct, total, 100. * noise_correct / total))
Example #4
0
def applyAttack(attack, model, data_loader, num_classes):
    if args.verbose:
        print(">> Applying attack ", attack, " on dataset ", args.dataset)

    ## SETUP ##
    model.eval()

    adv_noise = ADV_NOISE[attack]
    random_noise_size = RANDOM_NOISE_SIZE[attack][args.dataset]

    adv_data_tot, clean_data_tot, noisy_data_tot = 0, 0, 0
    label_tot = 0
    correct, adv_correct, noise_correct = 0, 0, 0
    total, generated_noise = 0, 0

    criterion = nn.CrossEntropyLoss().cuda()

    selected_list = []
    selected_index = 0

    ## ITERATE OVER DATA POINTS ##
    for data, target in tqdm.tqdm(data_loader, desc=attack):
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag = pred.eq(target.data).cpu()
        correct += equal_flag.sum()

        noisy_data = torch.add(data.data, random_noise_size,
                               torch.randn(data.size()).cuda())
        noisy_data = torch.clamp(noisy_data, MIN_PIXEL, MAX_PIXEL)

        if total == 0:
            clean_data_tot = data.clone().data.cpu()
            label_tot = target.clone().data.cpu()
            noisy_data_tot = noisy_data.clone().cpu()
        else:
            clean_data_tot = torch.cat(
                (clean_data_tot, data.clone().data.cpu()), 0)
            label_tot = torch.cat((label_tot, target.clone().data.cpu()), 0)
            noisy_data_tot = torch.cat(
                (noisy_data_tot, noisy_data.clone().cpu()), 0)

        # generate adversarial
        model.zero_grad()
        inputs = Variable(data.data, requires_grad=True)
        output = model(inputs)
        loss = criterion(output, target)
        loss.backward()

        if attack == 'fgsm':
            gradient = adversary.fgsm(inputs)
            adv_data = torch.add(inputs.data, adv_noise, gradient)
        elif attack == 'bim':
            gradient = adversary.bim(inputs, target, model, criterion,
                                     adv_noise, MIN_PIXEL, MAX_PIXEL)
            adv_data = torch.add(inputs.data, adv_noise, gradient)
        if attack == 'deepfool':
            _, adv_data = adversary.deepfool(model, data.data.clone(), target.data.cpu(), \
                                             num_classes, step_size=adv_noise[args.dataset], train_mode=False)
            adv_data = adv_data.cuda()
        elif attack == 'cwl2':
            _, adv_data = adversary.cw(model,
                                       data.data.clone(),
                                       target.data.cpu(),
                                       1.0,
                                       'l2',
                                       crop_frac=1.0)

        adv_data = torch.clamp(adv_data, MIN_PIXEL, MAX_PIXEL)

        # measure the noise
        temp_noise_max = torch.abs(
            (data.data - adv_data).view(adv_data.size(0), -1))
        temp_noise_max, _ = torch.max(temp_noise_max, dim=1)
        generated_noise += torch.sum(temp_noise_max)

        if total == 0:
            adv_data_tot = adv_data.clone().cpu()
        else:
            adv_data_tot = torch.cat((adv_data_tot, adv_data.clone().cpu()), 0)

        output = model(Variable(adv_data, volatile=True))

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_adv = pred.eq(target.data).cpu()
        adv_correct += equal_flag_adv.sum()

        output = model(Variable(noisy_data, volatile=True))

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_noise = pred.eq(target.data).cpu()
        noise_correct += equal_flag_noise.sum()

        for i in range(data.size(0)):
            if equal_flag[i] == 1 and equal_flag_noise[
                    i] == 1 and equal_flag_adv[i] == 0:
                selected_list.append(selected_index)
            selected_index += 1

        total += data.size(0)

    ## OUTPUT ##
    selected_list = torch.LongTensor(selected_list)
    clean_data_tot = torch.index_select(clean_data_tot, 0, selected_list)
    adv_data_tot = torch.index_select(adv_data_tot, 0, selected_list)
    noisy_data_tot = torch.index_select(noisy_data_tot, 0, selected_list)
    label_tot = torch.index_select(label_tot, 0, selected_list)

    save_path = '%s/%s_%s/' % (SAVE_PATH, args.model, args.dataset)
    os.makedirs(save_path, exist_ok=True)
    torch.save(
        clean_data_tot, '%s/clean_data_%s_%s_%s.pth' %
        (save_path, args.model, args.dataset, attack))
    torch.save(
        adv_data_tot, '%s/adv_data_%s_%s_%s.pth' %
        (save_path, args.model, args.dataset, attack))
    torch.save(
        noisy_data_tot, '%s/noisy_data_%s_%s_%s.pth' %
        (save_path, args.model, args.dataset, attack))
    torch.save(
        label_tot, '%s/label_%s_%s_%s.pth' %
        (save_path, args.model, args.dataset, attack))

    print('Adversarial Noise:({:.2f})\n'.format(generated_noise / total))
    print('Final Accuracy: {}/{} ({:.2f}%)\n'.format(correct, total,
                                                     100. * correct / total))
    print('Adversarial Accuracy: {}/{} ({:.2f}%)\n'.format(
        adv_correct, total, 100. * adv_correct / total))
    print('Noisy Accuracy: {}/{} ({:.2f}%)\n'.format(
        noise_correct, total, 100. * noise_correct / total))
def generate_adversarial_images(args):
    # assertions
    assert args.adversary_to_generate is not None, \
        "adversary_to_generate can't be None"
    assert AdversaryType.has_value(args.adversary_to_generate), \
        "\"{}\" adversary_to_generate not defined".format(args.adversary_to_generate)

    defense_name = None if not args.defenses else args.defenses[0]
    # defense = get_defense(defense_name, args)
    data_indices = _get_data_indices(args)
    data_type = args.data_type if args.data_type == "train" else "valid"
    dataset = load_dataset(args, data_type, None, data_indices=data_indices)
    data_loader = get_data_loader(
        dataset,
        batchsize=args.batchsize,
        device=args.device,
        shuffle=False)

    model, _, _ = get_model(args, load_checkpoint=True, defense_name=defense_name)

    adv_params = constants.get_adv_params(args)
    print('| adv_params:', adv_params)
    status = None
    all_inputs = None
    all_outputs = None
    all_targets = None
    bar = progressbar.ProgressBar(len(data_loader))
    bar.start()
    for batch_num, (imgs, targets) in enumerate(data_loader):
        if args.adversary_to_generate == str(AdversaryType.DEEPFOOL):
            assert adv_params['learning_rate'] is not None
            s, r = adversary.deepfool(
                model, imgs, targets, args.data_params['NUM_CLASSES'],
                train_mode=(args.data_type == 'train'), max_iter=args.max_adv_iter,
                step_size=adv_params['learning_rate'], batch_size=args.batchsize,
                labels=dataset.get_classes())
        elif args.adversary_to_generate == str(AdversaryType.FGS):
            s, r = adversary.fgs(
                model, imgs, targets, train_mode=(args.data_type == 'train'),
                mode=args.fgs_mode)
        elif args.adversary_to_generate == str(AdversaryType.IFGS):
            assert adv_params['learning_rate'] is not None
            s, r = adversary.ifgs(
                model, imgs, targets,
                train_mode=(args.data_type == 'train'), max_iter=args.max_adv_iter,
                step_size=adv_params['learning_rate'], mode=args.fgs_mode)
        elif args.adversary_to_generate == str(AdversaryType.CWL2):
            assert args.adv_strength is not None and len(args.adv_strength) == 1
            if len(args.crop_frac) == 1:
                crop_frac = args.crop_frac[0]
            else:
                crop_frac = 1.0
            s, r = adversary.cw(
                model, imgs, targets, args.adv_strength[0], 'l2',
                tv_weight=args.tvm_weight,
                train_mode=(args.data_type == 'train'), max_iter=args.max_adv_iter,
                drop_rate=args.pixel_drop_rate, crop_frac=crop_frac,
                kappa=args.margin)
        elif args.adversary_to_generate == str(AdversaryType.CWLINF):
            assert args.adv_strength is not None and len(args.adv_strength) == 1
            s, r = adversary.cw(
                model, imgs, targets, args.adv_strength[0], 'linf',
                bound=args.adv_bound,
                tv_weight=args.tvm_weight,
                train_mode=(args.data_type == 'train'), max_iter=args.max_adv_iter,
                drop_rate=args.pixel_drop_rate, crop_frac=args.crop_frac,
                kappa=args.margin)

        if status is None:
            status = s.clone()
            all_inputs = imgs.clone()
            all_outputs = imgs + r
            all_targets = targets.clone()
        else:
            status = torch.cat((status, s), 0)
            all_inputs = torch.cat((all_inputs, imgs), 0)
            all_outputs = torch.cat((all_outputs, imgs + r), 0)
            all_targets = torch.cat((all_targets, targets), 0)
        bar.update(batch_num)

    print("| computing adversarial stats...")
    if args.compute_stats:
        rb, ssim, sc = adversary.compute_stats(all_inputs, all_outputs, status)
        print('| average robustness = ' + str(rb))
        print('| average SSIM = ' + str(ssim))
        print('| success rate = ' + str(sc))

    # Unnormalize before saving
    unnormalize = Unnormalize(args.data_params['MEAN_STD']['MEAN'],
                                args.data_params['MEAN_STD']['STD'])
    all_inputs = unnormalize(all_inputs)
    all_outputs = unnormalize(all_outputs)
    # save output
    output_file = get_adversarial_file_path(
        args, args.adversarial_root, defense_name, adv_params,
        data_indices['end_idx'], start_idx=data_indices['start_idx'],
        with_defense=False)
    print("| Saving adversarial data at " + output_file)
    if not os.path.isdir(args.adversarial_root):
        os.makedirs(args.adversarial_root)
    torch.save({'status': status, 'all_inputs': all_inputs,
                'all_outputs': all_outputs, 'all_targets': all_targets},
                output_file)
Example #6
0
def Attack(args, data, model, criterion, target, inputs, adv_noise, min_pixel,
           max_pixel):

    if args.adv_type == 'FGSM':
        gradient = torch.ge(inputs.grad.data, 0)
        gradient = (gradient.float() - 0.5) * 2
        if args.net_type == 'densenet':
            gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0 / 255.0))
            gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1 / 255.0))
            gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7 / 255.0))
        else:
            gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
            gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
            gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                 gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

    elif args.adv_type == 'BIM':
        gradient = torch.sign(inputs.grad.data)
        for k in range(5):
            inputs = torch.add(inputs.data, adv_noise, gradient)
            inputs = torch.clamp(inputs, min_pixel, max_pixel)
            inputs = Variable(inputs, requires_grad=True)
            output = model(inputs)
            loss = criterion(output, target)
            loss.backward()
            gradient = torch.sign(inputs.grad.data)
            if args.net_type == 'densenet':
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0 / 255.0))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1 / 255.0))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7 / 255.0))
            else:
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

    if args.adv_type == 'DeepFool':

        _, adv_data = adversary.deepfool(model, data.data.clone(), target.data.cpu(), \
                                         args.num_classes, step_size=adv_noise, train_mode=False)
        adv_data = adv_data.cuda()

    elif args.adv_type == 'CWL2':
        _, adv_data = adversary.cw(model,
                                   data.data.clone(),
                                   target.data.cpu(),
                                   1.0,
                                   'l2',
                                   crop_frac=1.0)
    else:
        adv_data = torch.add(inputs.data, adv_noise, gradient)

    adv_data = torch.clamp(adv_data, min_pixel, max_pixel)
    return adv_data