コード例 #1
0
ファイル: test.py プロジェクト: steven202/robust_union
def get_attack(attack, fmodel):
    args = []
    kwargs = {}
    # L0
    if attack == 'SAPA':
        A = fa.SaltAndPepperNoiseAttack()
    elif attack == 'PA':
        A = fa.L1BrendelBethgeAttack()

    # L2
    elif 'IGD' in attack:
        A = fa.L2BasicIterativeAttack()
    elif attack == 'AGNA':
        A = fa.L2AdditiveGaussianNoiseAttack()
    elif attack == 'BA':
        A = fa.BoundaryAttack()
    elif 'DeepFool' in attack:
        A = fa.L2DeepFoolAttack()
    elif attack == 'PAL2':
        A = fa.L2BrendelBethgeAttack()
    elif attack == "CWL2":
        A = fa.L2CarliniWagnerAttack()

    # L inf
    elif 'FGSM' in attack and not 'IFGSM' in attack:
        A = fa.FGSM()
    elif 'PGD' in attack:
        A = fa.LinfPGD()
    elif 'IGM' in attack:
        A = fa.LinfBrendelBethgeAttack()
    else:
        raise Exception('Not implemented')
    return A, 0, 0, 0
コード例 #2
0
baseline_model.fit(ds_train,
                   epochs=1,
                   validation_data=ds_test,
                   steps_per_epoch=7500 // batch_size,
                   validation_steps=2500 // batch_size,
                   callbacks=[tensorboard_callback])

for images, labels in ds_train.take(1):  # only take first element of dataset
    images_ex = ep.astensors(images)
    labels_ex = ep.astensors(labels)

fmodel = fb.TensorFlowModel(baseline_model, bounds=(0, 1))

attacks = [
    fa.FGSM(),
    fa.LinfPGD(),
    fa.LinfBasicIterativeAttack(),
    fa.LinfAdditiveUniformNoiseAttack(),
    fa.LinfDeepFoolAttack(),
]

attacks_names = [
    "FGSM", "LinfPGD", "LinfBasicIterativeAttack",
    "LinfAdditiveUniformNoiseAttack", "LinfDeepFoolAttack"
]

epsilons = [
    0.0, 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.1,
    0.3, 0.5, 1.0
]
コード例 #3
0
cp_latest_filenames = [
    os.path.join(*[
        ROOT_DIR, 'Torch_projects', 'mlp_768_mnist', 'checkpoints',
        'mlp_768_mnist_latest_checkpoint_v0.pt'
    ]),
    os.path.join(*[
        ROOT_DIR, 'Torch_projects', 'lca_768_mlp_mnist', 'checkpoints',
        'lca_768_mlp_mnist_latest_checkpoint_v0.pt'
    ])
]

attack_params = {'linfPGD': {'abs_stepsize': 0.01, 'steps': 5000}}

attacks = [
    #fa.FGSM(),
    fa.LinfPGD(**attack_params['linfPGD']),
    #fa.LinfBasicIterativeAttack(),
    #fa.LinfAdditiveUniformNoiseAttack(),
    #fa.LinfDeepFoolAttack(),
]

epsilons = [  # allowed perturbation size
    0.0,
    0.05,
    0.1,
    0.15,
    0.2,
    0.25,
    0.3,
    0.35,
    #0.4,