def run_attack(loader, model, image, target_class):
    assert image.dtype == np.float32
    assert image.min() >= 0
    assert image.max() <= 255

    starting_point, calls, is_adv = loader.get_target_class_image(
        target_class, model)

    if not is_adv:
        print('could not find a starting point')
        return None

    criterion = TargetClass(target_class)
    original_label = model(image)
    # we can optimize the number of iterations
    iterations = (1000 - calls - 1) // 5 // 2

    attack = BoundaryAttack(model, criterion)
    # adv = Adversarial(model, criterion, image, original_label)
    return attack(image,
                  original_label,
                  iterations=iterations,
                  max_directions=10,
                  tune_batch_size=False,
                  starting_point=starting_point)
Exemple #2
0
def run_attack(model, image, target_class):
    criterion = TargetClass(target_class)
    # model == Composite model
    # Backward model = substitute model (resnet vgg alex) used to calculate gradients
    # Forward model = black-box model
    distance = MeanSquaredDistance
    attack = CarliniWagnerL2Attack()
    # attack = foolbox.attacks.annealer(model, criterion)
    # prediction of our black box model on the original image
    original_label = np.argmax(model.predictions(image))
    adv = Adversarial(model, criterion, image, original_label, distance=distance)
    return attack(adv)
Exemple #3
0
def run_attack(model, image, target_class, pos_salience):
    criterion = TargetClass(target_class)
    attack = SaliencyMapAttack(model, criterion)
    original_label = np.argmax(model.predictions(image))
    return attack(image, original_label)