def test_pointwise_untargeted_attack( request: Any, fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription, attack: fa.PointwiseAttack, ) -> None: (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks if not low_dimensional_input or not real: pytest.skip() x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower) fmodel = fmodel.transform_bounds((0, 1)) init_attack = fa.SaltAndPepperNoiseAttack(steps=50) init_advs = init_attack.run(fmodel, x, y) advs = attack.run(fmodel, x, y, starting_points=init_advs) init_norms_l0 = ep.norms.lp(flatten(init_advs - x), p=0, axis=-1) norms_l0 = ep.norms.lp(flatten(advs - x), p=0, axis=-1) init_norms_l2 = ep.norms.lp(flatten(init_advs - x), p=2, axis=-1) norms_l2 = ep.norms.lp(flatten(advs - x), p=2, axis=-1) is_smaller_l0 = norms_l0 < init_norms_l0 is_smaller_l2 = norms_l2 < init_norms_l2 assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y) assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y) assert is_smaller_l2.any() assert is_smaller_l0.any()
def get_attack(attack, fmodel): args = [] kwargs = {} # L0 if attack == 'SAPA': A = fa.SaltAndPepperNoiseAttack() elif attack == 'PA': A = fa.L1BrendelBethgeAttack() # L2 elif 'IGD' in attack: A = fa.L2BasicIterativeAttack() elif attack == 'AGNA': A = fa.L2AdditiveGaussianNoiseAttack() elif attack == 'BA': A = fa.BoundaryAttack() elif 'DeepFool' in attack: A = fa.L2DeepFoolAttack() elif attack == 'PAL2': A = fa.L2BrendelBethgeAttack() elif attack == "CWL2": A = fa.L2CarliniWagnerAttack() # L inf elif 'FGSM' in attack and not 'IFGSM' in attack: A = fa.FGSM() elif 'PGD' in attack: A = fa.LinfPGD() elif 'IGM' in attack: A = fa.LinfBrendelBethgeAttack() else: raise Exception('Not implemented') return A, 0, 0, 0
def get_attack(attack, fmodel): args = [] kwargs = {} # L0 if attack == 'SAPA': metric = foolbox.distances.L0 A = fa.SaltAndPepperNoiseAttack(fmodel, distance = metric) elif attack == 'PA': metric = foolbox.distances.L0 A = fa.PointwiseAttack(fmodel, distance = metric) # L2 elif 'IGD' in attack: metric = foolbox.distances.MSE A = fa.L2BasicIterativeAttack(fmodel, distance = metric) # kwargs['epsilons'] = 1.5 elif attack == 'AGNA': metric = foolbox.distances.MSE kwargs['epsilons'] = np.linspace(0.5, 1, 50) A = fa.AdditiveGaussianNoiseAttack(fmodel, distance = metric) elif attack == 'BA': metric = foolbox.distances.MSE A = fa.BoundaryAttack(fmodel, distance = metric) kwargs['log_every_n_steps'] = 500001 elif 'DeepFool' in attack: metric = foolbox.distances.MSE A = fa.DeepFoolL2Attack(fmodel, distance = metric) elif attack == 'PAL2': metric = foolbox.distances.MSE A = fa.PointwiseAttack(fmodel, distance = metric) elif attack == "CWL2": metric = foolbox.distances.MSE A = fa.CarliniWagnerL2Attack(fmodel, distance = metric) # L inf elif 'FGSM' in attack and not 'IFGSM' in attack: metric = foolbox.distances.Linf A = fa.FGSM(fmodel, distance = metric) kwargs['epsilons'] = 20 elif 'PGD' in attack: metric = foolbox.distances.Linf A = fa.LinfinityBasicIterativeAttack(fmodel, distance = metric) elif 'IGM' in attack: metric = foolbox.distances.Linf A = fa.MomentumIterativeAttack(fmodel, distance = metric) else: raise Exception('Not implemented') return A, metric, args, kwargs
def test_pointwise_targeted_attack( request: Any, fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription, attack: fa.PointwiseAttack, ) -> None: (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks if not low_dimensional_input or not real: pytest.skip() x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower) fmodel = fmodel.transform_bounds((0, 1)) init_attack = fa.SaltAndPepperNoiseAttack(steps=50) init_advs = init_attack.run(fmodel, x, y) logits = fmodel(init_advs) num_classes = logits.shape[-1] target_classes = logits.argmax(-1) target_classes = ep.where(target_classes == y, (target_classes + 1) % num_classes, target_classes) criterion = fbn.TargetedMisclassification(target_classes) advs = attack.run(fmodel, x, criterion, starting_points=init_advs) init_norms_l0 = ep.norms.lp(flatten(init_advs - x), p=0, axis=-1) norms_l0 = ep.norms.lp(flatten(advs - x), p=0, axis=-1) init_norms_l2 = ep.norms.lp(flatten(init_advs - x), p=2, axis=-1) norms_l2 = ep.norms.lp(flatten(advs - x), p=2, axis=-1) is_smaller_l0 = norms_l0 < init_norms_l0 is_smaller_l2 = norms_l2 < init_norms_l2 assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y) assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y) assert fbn.accuracy(fmodel, advs, target_classes) > fbn.accuracy( fmodel, x, target_classes) assert fbn.accuracy(fmodel, advs, target_classes) >= fbn.accuracy( fmodel, init_advs, target_classes) assert is_smaller_l2.any() assert is_smaller_l0.any()
uses_grad=True, requires_real_model=True, ), AttackTestTarget(fa.L2DeepFoolAttack(steps=50, loss="logits"), uses_grad=True), AttackTestTarget(fa.L2DeepFoolAttack(steps=50, loss="crossentropy"), uses_grad=True), AttackTestTarget(fa.LinfDeepFoolAttack(steps=50), uses_grad=True), AttackTestTarget(fa.BoundaryAttack(steps=50)), AttackTestTarget( fa.BoundaryAttack( steps=110, init_attack=fa.LinearSearchBlendedUniformNoiseAttack(steps=50), update_stats_every_k=1, )), AttackTestTarget(fa.SaltAndPepperNoiseAttack(steps=50), None, uses_grad=True), AttackTestTarget(fa.SaltAndPepperNoiseAttack(steps=50, channel_axis=1), None, uses_grad=True), AttackTestTarget(fa.LinearSearchBlendedUniformNoiseAttack(steps=50), None), AttackTestTarget(fa.L2AdditiveGaussianNoiseAttack(), 2500.0), AttackTestTarget(fa.L2ClippingAwareAdditiveGaussianNoiseAttack(), 500.0), AttackTestTarget(fa.LinfAdditiveUniformNoiseAttack(), 10.0), AttackTestTarget( fa.L2RepeatedAdditiveGaussianNoiseAttack(check_trivial=False), 1000.0), AttackTestTarget( fa.L2ClippingAwareRepeatedAdditiveGaussianNoiseAttack( check_trivial=False), 200.0,
(fa.GaussianBlurAttack(steps=10, max_sigma=224.0), None, True, True), (fa.L2DeepFoolAttack(steps=50, loss="logits"), None, True, False), (fa.L2DeepFoolAttack(steps=50, loss="crossentropy"), None, True, False), (fa.LinfDeepFoolAttack(steps=50), None, True, False), (fa.BoundaryAttack(steps=50), None, False, False), ( fa.BoundaryAttack( steps=110, init_attack=fa.LinearSearchBlendedUniformNoiseAttack(steps=50), update_stats_every_k=1, ), None, False, False, ), (fa.SaltAndPepperNoiseAttack(steps=50), None, True, False), (fa.SaltAndPepperNoiseAttack(steps=50, channel_axis=1), None, True, False), (fa.LinearSearchBlendedUniformNoiseAttack(steps=50), None, False, False), (fa.L2AdditiveGaussianNoiseAttack(), 2500.0, False, False), (fa.LinfAdditiveUniformNoiseAttack(), 10.0, False, False), ( fa.L2RepeatedAdditiveGaussianNoiseAttack(check_trivial=False), 1000.0, False, False, ), (fa.L2RepeatedAdditiveGaussianNoiseAttack(), 1000.0, False, False), (fa.L2RepeatedAdditiveUniformNoiseAttack(), 1000.0, False, False), (fa.LinfRepeatedAdditiveUniformNoiseAttack(), 3.0, False, False), ]
), AttackTestTarget(fa.L2DeepFoolAttack(steps=50, loss="logits"), uses_grad=True), AttackTestTarget( fa.L2DeepFoolAttack(steps=50, loss="crossentropy"), uses_grad=True ), AttackTestTarget(fa.LinfDeepFoolAttack(steps=50), uses_grad=True), AttackTestTarget(fa.BoundaryAttack(steps=50)), AttackTestTarget( fa.BoundaryAttack( steps=110, init_attack=fa.LinearSearchBlendedUniformNoiseAttack(steps=50), update_stats_every_k=1, ) ), AttackTestTarget( fa.SaltAndPepperNoiseAttack(steps=50), None, uses_grad=True, stochastic_attack=True, ), AttackTestTarget( fa.SaltAndPepperNoiseAttack(steps=50, channel_axis=1), None, uses_grad=True, stochastic_attack=True, ), AttackTestTarget( fa.LinearSearchBlendedUniformNoiseAttack(steps=50), None, stochastic_attack=True ), AttackTestTarget( fa.L2AdditiveGaussianNoiseAttack(), 3000.0, stochastic_attack=True