예제 #1
0
def test_deepfool_auto_p0(bn_adversarial):
    adv = bn_adversarial
    attack = DeepFoolAttack()
    with pytest.raises(ValueError):
        attack(adv, p=0)
    assert adv.perturbed is None
    assert adv.distance.value == np.inf
예제 #2
0
def test_deepfool_auto_mae(bn_adversarial_mae):
    adv = bn_adversarial_mae
    attack = DeepFoolAttack()
    with pytest.raises(NotImplementedError):
        attack(adv)
    assert adv.perturbed is None
    assert adv.distance.value == np.inf
예제 #3
0
def test_attack_convergence(bn_adversarial):
    adv = bn_adversarial
    attack1 = DeepFoolAttack()
    attack1(adv)
    attack2 = BoundaryAttack()
    attack2(adv, iterations=5000, verbose=True)
    # should converge
    assert adv.image is not None
    assert adv.distance.value < np.inf
def run():
    run_button.config(state='disabled')
    test_loader = load_data()
    torch_model = select_model()
    if use_cuda.get():
        log_text = 'Move model to cuda.'
        torch_model = torch_model.cuda()

    torch_model.eval()
    fmodel = PyTorchModel(torch_model, bounds=[0, 1], num_classes=10)

    method = attack_comboxlist.get()
    log_text = 'Perform {} attack... \n'.format(method)
    send_information(Receive_window, log_text)
    if method == 'FGSM':
        from foolbox.attacks import FGSM
        attack = FGSM(model=fmodel, criterion=Misclassification())
    if method == 'iFGSM':
        from foolbox.attacks import IterativeGradientSignAttack
        attack = IterativeGradientSignAttack(model=fmodel,
                                             criterion=Misclassification())
    if method == 'DeepFool':
        from foolbox.attacks import DeepFoolAttack
        attack = DeepFoolAttack(model=fmodel, criterion=Misclassification())

    attack_root = 'attacks/' + comboxlist.get() + '/' + method + '_ms'
    hacked_path = attack_root + '/hacked'
    hacked_data_path = attack_root + '/hacked_data'
    original_path = attack_root + '/original'
    original_data_path = attack_root + '/original_data'
    if os.path.exists(attack_root) is False:
        os.makedirs(attack_root)
        os.mkdir(hacked_path)
        os.mkdir(hacked_data_path)
        os.mkdir(original_path)
        os.mkdir(original_data_path)

    count = 1
    for data, label in test_loader:
        data = data[0].numpy()
        label = label.item()
        adversarial = attack(data, label)

        if adversarial is not None:
            if np.linalg.norm(adversarial - data) == 0:
                continue
            adv_label = np.argmax(fmodel.predictions(adversarial))

            if np.linalg.norm(adversarial - data) > 0:
                dataset = comboxlist.get()
                if dataset == 'MNIST':
                    image_data = adversarial[0] * 255
                    ori_data = data[0] * 255

                if dataset == 'CIFAR10':
                    image_data = adversarial.transpose(1, 2, 0) * 255
                    ori_data = data.transpose(1, 2, 0) * 255

                if save_adv.get():
                    hackedname = hacked_data_path + '/' + str(
                        count) + '-' + str(label) + '-' + str(
                            adv_label) + ".npy"
                    np.save(hackedname, image_data)
                    image = Image.fromarray(image_data.astype(np.uint8))
                    image.save(
                        "{hackedpath}/{name}-{label}-{adv_label}.png".format(
                            hackedpath=hacked_path,
                            name=count,
                            label=label,
                            adv_label=adv_label))

                if save_ori.get():
                    oriname = original_data_path + '/' + str(
                        count) + '-' + str(label) + ".npy"
                    np.save(oriname, ori_data)
                    oriimage = Image.fromarray(ori_data.astype(np.uint8))
                    oriimage.save("{originalpath}/{name}-{label}.png".format(
                        originalpath=original_path, name=count, label=label))

                count = count + 1

            if count % (int(att_num.get()) / 10) == 0:
                log_text = "Attack: {}/{}".format(count, att_num.get())
                send_information(Receive_window, log_text)
            if count > int(att_num.get()):
                break

    log_text = "Done! The adversarial images and correspoinding data are stored in attacks for next use in step4!"
    send_information(Receive_window, log_text)
    run_button.config(state='normal')
예제 #5
0
def test_deepfool_auto_linf(bn_adversarial_linf):
    adv = bn_adversarial_linf
    attack = DeepFoolAttack()
    attack(adv)
    assert adv.perturbed is not None
    assert adv.distance.value < np.inf
def test_deepfool_auto_p0(bn_model, bn_criterion, bn_images, bn_labels):
    attack = DeepFoolAttack(bn_model, bn_criterion)
    with pytest.raises(ValueError):
        attack(bn_images, bn_labels, unpack=False, p=0)
def test_deepfool_auto_mae(bn_model, bn_criterion, bn_images, bn_labels):
    attack = DeepFoolAttack(bn_model, bn_criterion, distance=MAE)
    with pytest.raises(NotImplementedError):
        attack(bn_images, bn_labels, unpack=False)
def test_deepfool_auto_linf(bn_model, bn_criterion, bn_images, bn_labels):
    attack = DeepFoolAttack(bn_model, bn_criterion, distance=Linf)
    advs = attack(bn_images, bn_labels, unpack=False)
    for adv in advs:
        assert adv.perturbed is not None
        assert adv.distance.value < np.inf