Пример #1
0
def pytorch_mnist(request: Any) -> ModelAndData:
    fmodel = fbn.zoo.ModelLoader.get().load("examples/zoo/mnist/",
                                            module_name="foolbox_model")
    x, y = fbn.samples(fmodel, dataset="mnist", batchsize=16)
    x = ep.astensor(x)
    y = ep.astensor(y)
    return fmodel, x, y
Пример #2
0
def main() -> None:
    # instantiate a model (could also be a TensorFlow or JAX model)
    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    images, labels = ep.astensors(
        *samples(fmodel, dataset="imagenet", batchsize=16))
    clean_acc = accuracy(fmodel, images, labels) * 100
    print(f"clean accuracy:  {clean_acc:.1f} %")

    # the attack trys a combination of specified rotations and translations to an image
    # stops early if adversarial shifts and translations for all images are found
    attack = fa.SpatialAttack(
        max_translation=6,  # 6px so x in [x-6, x+6] and y in [y-6, y+6]
        num_translations=6,  # number of translations in x, y.
        max_rotation=20,  # +- rotation in degrees
        num_rotations=5,  # number of rotations
        # max total iterations = num_rotations * num_translations**2
    )

    # report the success rate of the attack (percentage of samples that could
    # be adversarially perturbed) and the robust accuracy (the remaining accuracy
    # of the model when it is attacked)
    xp_, _, success = attack(fmodel, images, labels)
    suc = success.float32().mean().item() * 100
    print(f"attack success:  {suc:.1f} %"
          " (for the specified rotation and translation bounds)")
    print(f"robust accuracy: {100 - suc:.1f} %"
          " (for the specified rotation and translation bounds)")
Пример #3
0
def test_genattack_numpy(request: Any) -> None:
    class Model:
        def __call__(self, inputs: Any) -> Any:
            return inputs.mean(axis=(2, 3))

    model = Model()
    with pytest.raises(ValueError):
        fbn.NumPyModel(model, bounds=(0, 1), data_format="foo")

    fmodel = fbn.NumPyModel(model, bounds=(0, 1))
    x, y = ep.astensors(
        *fbn.samples(
            fmodel, dataset="imagenet", batchsize=16, data_format="channels_first"
        )
    )

    with pytest.raises(ValueError, match="data_format"):
        fbn.attacks.GenAttack(reduced_dims=(2, 2)).run(
            fmodel, x, fbn.TargetedMisclassification(y), epsilon=0.3
        )

    with pytest.raises(ValueError, match="channel_axis"):
        fbn.attacks.GenAttack(channel_axis=2, reduced_dims=(2, 2)).run(
            fmodel, x, fbn.TargetedMisclassification(y), epsilon=0.3
        )
Пример #4
0
def test_loading_model(request: Any, url: str) -> None:
    backend = request.config.option.backend
    if backend != "tensorflow":
        pytest.skip()

    # download model
    try:
        fmodel = fbn.zoo.get_model(url, name="MobileNetV2", overwrite=True)
    except fbn.zoo.GitCloneError:
        pytest.skip()

    # download again (test overwriting)
    try:
        fmodel = fbn.zoo.get_model(url, name="MobileNetV2", overwrite=True)
    except fbn.zoo.GitCloneError:
        pytest.skip()

    # create a dummy image
    # x = np.zeros(dim, dtype=np.float32)
    # x[:] = np.random.randn(*x.shape)
    x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=16)

    # run the model
    # logits = model(x)
    # probabilities = ep.softmax(logits)
    # predicted_class = np.argmax(logits)
    assert fbn.accuracy(fmodel, x, y) > 0.9
Пример #5
0
def test_samples(fmodel_and_data: ModelAndData, batchsize: int,
                 dataset: str) -> None:
    fmodel, _, _ = fmodel_and_data
    if hasattr(fmodel, "data_format"):
        data_format = fmodel.data_format  # type: ignore
        x, y = fbn.samples(fmodel, dataset=dataset, batchsize=batchsize)
        assert len(x) == len(y) == batchsize
        assert not ep.istensor(x)
        assert not ep.istensor(y)
        x, y = fbn.samples(fmodel,
                           batchsize=batchsize,
                           data_format=data_format)
        assert len(x) == len(y) == batchsize
        assert not ep.istensor(x)
        assert not ep.istensor(y)
        with pytest.raises(ValueError):
            data_format = {
                "channels_first": "channels_last",
                "channels_last": "channels_first",
            }[data_format]
            fbn.samples(fmodel, batchsize=batchsize, data_format=data_format)
    else:
        x, y = fbn.samples(fmodel,
                           batchsize=batchsize,
                           data_format="channels_first")
        assert len(x) == len(y) == batchsize
        assert not ep.istensor(x)
        assert not ep.istensor(y)
        with pytest.raises(ValueError):
            fbn.samples(fmodel, batchsize=batchsize)
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--steps',
                        type=int,
                        default=20000,
                        help='Iteration of BA')
    parser.add_argument('--targeted',
                        action='store',
                        default=False,
                        help='For targeted attack')

    args = parser.parse_args()

    model = Net()
    model.load_state_dict(torch.load('mnist_cnn.pt'))
    model.eval()

    preprocessing = dict(mean=0.1307, std=0.3081)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    fmodel = fmodel.transform_bounds((0, 1))
    assert fmodel.bounds == (0, 1)

    images, labels = ep.astensors(
        *samples(fmodel, dataset="mnist", batchsize=10))

    print('Model accuracy on clean examples: {}'.format(
        accuracy(fmodel, images, labels)))

    if args.targeted:
        target_class = (labels + 7) % 10
        criterion = fb.criteria.TargetedMisclassification(target_class)
    else:
        criterion = fb.criteria.Misclassification(labels)

    attack = fa.BoundaryAttack(steps=args.steps, tensorboard=None)
    epsilons = np.linspace(0.01, 10, 20)
    raw, clipped, success = attack(fmodel, images, labels, epsilons=epsilons)

    robust_accuracy = 1 - success.float32().mean(axis=-1)

    plt.plot(epsilons, robust_accuracy.numpy())
    plt.xlabel("Epsilons")
    plt.ylabel("Robust Accuracy")
    plt.savefig('mnist_BA_robust_acc.jpg')
    plt.show()

    mean_distance = []
    for i in range(len(clipped)):
        dist = np.mean(fb.distances.l2(clipped[i], images).numpy())
        mean_distance.append(dist)

    plt.plot(epsilons, mean_distance)
    plt.xlabel('Epsilons')
    plt.ylabel('Mean L2 distance')
    plt.savefig("mnist_BA_mean_L2distance.jpg")
    plt.show()
Пример #7
0
def numpy_simple_model(request: Any) -> ModelAndData:
    class Model:
        def __call__(self, inputs: Any) -> Any:
            return inputs.mean(axis=(2, 3))

    model = Model()
    with pytest.raises(ValueError):
        fbn.NumPyModel(model, bounds=(0, 1), data_format="foo")

    fmodel = fbn.NumPyModel(model, bounds=(0, 1))
    with pytest.raises(ValueError, match="data_format"):
        x, _ = fbn.samples(fmodel, dataset="imagenet", batchsize=16)

    fmodel = fbn.NumPyModel(model, bounds=(0, 1), data_format="channels_first")
    with pytest.warns(UserWarning, match="returning NumPy arrays"):
        x, _ = fbn.samples(fmodel, dataset="imagenet", batchsize=16)

    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #8
0
def test_samples_large_batch(fmodel_and_data: ModelAndData, batchsize: int,
                             dataset: str) -> None:
    fmodel, _, _ = fmodel_and_data
    data_format = getattr(fmodel, "data_format", "channels_first")
    with pytest.warns(UserWarning, match="only 20 samples"):
        x, y = fbn.samples(fmodel,
                           dataset=dataset,
                           batchsize=batchsize,
                           data_format=data_format)
    assert len(x) == len(y) == batchsize
    assert not ep.istensor(x)
    assert not ep.istensor(y)
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--steps',
                        type=int,
                        default=10000,
                        help='Iteration of BA')
    parser.add_argument('--targeted',
                        action='store',
                        default=False,
                        help='For targeted attack')

    args = parser.parse_args()

    model = Net()
    model.load_state_dict(torch.load('mnist_cnn.pt'))
    model.eval()

    preprocessing = dict(mean=0.1307, std=0.3081)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    fmodel = fmodel.transform_bounds((0, 1))
    assert fmodel.bounds == (0, 1)

    images, labels = ep.astensors(
        *samples(fmodel, dataset="mnist", batchsize=10))

    print('Model accuracy on clean examples: {}'.format(
        accuracy(fmodel, images, labels)))
    epsilons = np.linspace(0.01, 10, 20)

    boundary_attack = fa.BoundaryAttack(steps=args.steps, tensorboard=None)
    _, _, ba_success = boundary_attack(fmodel,
                                       images,
                                       labels,
                                       epsilons=epsilons)

    ba_robust_accuracy = 1 - ba_success.float32().mean(axis=-1)

    random_attack = fa.L2RepeatedAdditiveGaussianNoiseAttack(
        repeats=args.steps)
    _, _, ra_success = random_attack(fmodel, images, labels, epsilons=epsilons)
    ra_robust_accuracy = 1 - ra_success.float32().mean(axis=-1)

    legends = ["Boundary Attack", "Random Attack"]
    plt.plot(epsilons, ba_robust_accuracy.numpy())
    plt.plot(epsilons, ra_robust_accuracy.numpy())
    plt.legend(legends, loc='upper right')
    plt.xlabel("Perturbation Norm (L2)")
    plt.ylabel("Robust Accuracy")
    plt.title("{} Queries".format(args.steps))
    plt.savefig('mnist_robust_acc.jpg')
    plt.show()
Пример #10
0
def pytorch_resnet18(request: Any) -> ModelAndData:
    if request.config.option.skipslow:
        pytest.skip()

    import torchvision.models as models

    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
    fmodel = fbn.PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
    x = ep.astensor(x)
    y = ep.astensor(y)
    return fmodel, x, y
Пример #11
0
def test_blur_numpy(request: Any) -> None:
    class Model:
        def __call__(self, inputs: Any) -> Any:
            return inputs.mean(axis=(2, 3))

    model = Model()
    with pytest.raises(ValueError):
        fbn.NumPyModel(model, bounds=(0, 1), data_format="foo")

    fmodel = fbn.NumPyModel(model, bounds=(0, 1))
    x, y = ep.astensors(*fbn.samples(
        fmodel, dataset="imagenet", batchsize=16,
        data_format="channels_first"))
    with pytest.raises(ValueError, match="data_format"):
        fbn.attacks.GaussianBlurAttack()(fmodel, x, y, epsilons=None)
Пример #12
0
def tensorflow_mobilenetv2(request: Any) -> ModelAndData:
    if request.config.option.skipslow:
        pytest.skip()

    import tensorflow as tf

    model = tf.keras.applications.MobileNetV2(weights="imagenet")
    fmodel = fbn.TensorFlowModel(
        model, bounds=(0, 255), preprocessing=dict(mean=127.5, std=127.5)
    )

    x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
    x = ep.astensor(x)
    y = ep.astensor(y)
    return fmodel, x, y
Пример #13
0
def jax_simple_model(request: Any) -> ModelAndData:
    import jax

    def model(x: Any) -> Any:
        return jax.numpy.mean(x, axis=(1, 2))

    bounds = (0, 1)
    fmodel = fbn.JAXModel(model, bounds=bounds)

    x, _ = fbn.samples(
        fmodel, dataset="cifar10", batchsize=16, data_format="channels_last"
    )
    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #14
0
def tensorflow_simple_sequential(
    device: Optional[str] = None, preprocessing: fbn.types.Preprocessing = None
) -> ModelAndData:
    import tensorflow as tf

    with tf.device(device):
        model = tf.keras.Sequential()
        model.add(tf.keras.layers.GlobalAveragePooling2D())
    bounds = (0, 1)
    fmodel = fbn.TensorFlowModel(
        model, bounds=bounds, device=device, preprocessing=preprocessing
    )

    x, _ = fbn.samples(fmodel, dataset="cifar10", batchsize=16)
    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #15
0
def tensorflow_simple_functional(request: Any) -> ModelAndData:
    import tensorflow as tf

    channels = 3
    h = w = 224
    data_format = tf.keras.backend.image_data_format()
    shape = (channels, h, w) if data_format == "channels_first" else (h, w, channels)
    x = x_ = tf.keras.Input(shape=shape)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    model = tf.keras.Model(inputs=x_, outputs=x)
    bounds = (0, 1)
    fmodel = fbn.TensorFlowModel(model, bounds=bounds)

    x, _ = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #16
0
def tensorflow_resnet50(request: Any) -> ModelAndData:
    if request.config.option.skipslow:
        pytest.skip()

    import tensorflow as tf

    if not tf.test.is_gpu_available():
        pytest.skip("ResNet50 test too slow without GPU")

    model = tf.keras.applications.ResNet50(weights="imagenet")
    preprocessing = dict(flip_axis=-1, mean=[104.0, 116.0, 123.0])  # RGB to BGR
    fmodel = fbn.TensorFlowModel(model, bounds=(0, 255), preprocessing=preprocessing)

    x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
    x = ep.astensor(x)
    y = ep.astensor(y)
    return fmodel, x, y
Пример #17
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--steps',
                        type=int,
                        default=1000,
                        help='Maximum number of steps to perform')
    parser.add_argument('--targeted',
                        action='store',
                        default=False,
                        help='For targeted attack')

    args = parser.parse_args()

    model = Net()
    model.load_state_dict(torch.load('mnist_cnn.pt'))
    model.eval()

    preprocessing = dict(mean=0.1307, std=0.3081)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    fmodel = fmodel.transform_bounds((0, 1))
    assert fmodel.bounds == (0, 1)

    images, labels = ep.astensors(
        *samples(fmodel, dataset="mnist", batchsize=10))

    print('Model accuracy on clean examples: {}'.format(
        accuracy(fmodel, images, labels)))

    if args.targeted:
        target_class = (labels + 7) % 10
        criterion = fb.criteria.TargetedMisclassification(target_class)
    else:
        criterion = fb.criteria.Misclassification(labels)

    attack = fa.L2DeepFoolAttack(steps=args.steps)
    epsilons = None
    raw, clipped, success = attack(fmodel, images, labels, epsilons=epsilons)

    robust_accuracy = 1 - success.float32().mean()
    print("Robust Accuracy", robust_accuracy.item())

    dist = np.mean(fb.distances.l2(clipped, images).numpy())
    print("Average perturbation norm", dist)
Пример #18
0
def tensorflow_mobilenetv3(request: Any) -> ModelAndData:
    if request.config.option.skipslow:
        pytest.skip()

    import tensorflow as tf

    model = tf.keras.applications.MobileNetV3Small(
        weights="imagenet", minimalistic=True
    )
    fmodel = fbn.TensorFlowModel(
        model,
        bounds=(0, 255),
        preprocessing=None,
    )

    x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=8)
    x = ep.astensor(x)
    y = ep.astensor(y)
    return fmodel, x, y
Пример #19
0
def tensorflow_simple_subclassing(request: Any) -> ModelAndData:
    import tensorflow as tf

    class Model(tf.keras.Model):  # type: ignore
        def __init__(self) -> None:
            super().__init__()
            self.pool = tf.keras.layers.GlobalAveragePooling2D()

        def call(self, x: tf.Tensor) -> tf.Tensor:  # type: ignore
            x = self.pool(x)
            return x

    model = Model()
    bounds = (0, 1)
    fmodel = fbn.TensorFlowModel(model, bounds=bounds)

    x, _ = fbn.samples(fmodel, dataset="cifar10", batchsize=16)
    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #20
0
def pytorch_simple_model(
    device: Any = None, preprocessing: fbn.types.Preprocessing = None
) -> ModelAndData:
    import torch

    class Model(torch.nn.Module):
        def forward(self, x: torch.Tensor) -> torch.Tensor:  # type: ignore
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    bounds = (0, 1)
    fmodel = fbn.PyTorchModel(
        model, bounds=bounds, device=device, preprocessing=preprocessing
    )

    x, _ = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
    x = ep.astensor(x)
    y = fmodel(x).argmax(axis=-1)
    return fmodel, x, y
Пример #21
0
    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])

    model.fit(x_train, y_train, epochs=5)

    model.evaluate(x_test, y_test, verbose=2)

    #instantiate the model
    fmodel = TensorFlowModel(model, bounds=(0, 1))

    #get data and test the model
    #wrapping the tensors with ep.astensors is optional, but it allows
    #us to work with EagerPy tensors in the following

    ##########################################################
    images, labels = samples(fmodel, dataset="mnist", batchsize=16)
    images1, labels1 = ep.astensors(
        *samples(fmodel, dataset="mnist", batchsize=16))
    print(accuracy(fmodel, images1, labels1))

    predict = fmodel(images).numpy()
    tf.nn.softmax(predict).numpy()
    correct_pred = tf.math.argmax(predict, 1)
    print(correct_pred)

    #print(images)
    images_arr = np.array(images)

    #print(images_arr)

    alist = []
import foolbox.attacks as fa
import numpy as np

if __name__ == "__main__":
    # instantiate a model (could also be a TensorFlow or JAX model)
    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    images, labels = ep.astensors(
        *samples(fmodel, dataset="imagenet", batchsize=16))
    clean_acc = accuracy(fmodel, images, labels)
    print(f"clean accuracy:  {clean_acc * 100:.1f} %")
    print("")

    attacks = [
        fa.FGSM(),
        fa.LinfPGD(),
        fa.LinfBasicIterativeAttack(),
        fa.LinfAdditiveUniformNoiseAttack(),
        fa.LinfDeepFoolAttack(),
    ]

    epsilons = [
        0.0,
        0.0005,
Пример #23
0
def main() -> None:
    # instantiate a model (could also be a TensorFlow or JAX model)
    #model = models.resnet18(pretrained=True).eval()
    #model=torch.load('/data1/zyh/copycat/Framework/cifar_model.pth')

    model =AlexNet()
    path = "./cifar_net.pth"
    #path = '/data1/zyh/copycat/Framework/cifar_model.pth'
    #model.load_state_dict(torch.load('/data1/zyh/copycat/Framework/cifar_model.pth'))
    #pretrained_dict = {k: v for k, v in model_pretrained.items() if k in model_dict}
    #model_dict.update(pretrained_dict)
    #model.load_state_dict(state_dict)
    model.load_state_dict(torch.load(path),strict=True)
    model.eval()

    print(type(model))
    #preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
    preprocessing = dict(mean=[0.5]*3, std=[0.5]*3, axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)


    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    #test_dataset = torchvision.datasets.CIFAR10(root='~/.torch/',
    #                                         train=True,
    #                                         #transform = transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()]),
    #                                         transform = transforms.Compose([transforms.ToTensor()]),
    #                                         download=True)
    #test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
    #                                       batch_size=128, #该参数表示每次读取的批样本个数
    #                                       shuffle=False) #该参数表示读取时是否打乱样本顺序
    #                                       # 创建迭代器
    #data_iter = iter(test_loader)

    #images, labels = next(data_iter)
    # 当迭代开始时, 队列和线程开始读取数据
    #images, labels = data_iter.next()
    #images=images.to(device)
    #labels=labels.to(device)
    #im=images
    #images=im.resize(100,3,128,128)
    images, labels = ep.astensors(*samples(fmodel, dataset="cifar10", batchsize=16))
    #images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
    #print(images.shape)
    clean_acc = accuracy(fmodel, images, labels)
    
    print(f"clean accuracy:  {clean_acc * 100:.1f} %")

    # apply the attack
    attack = LinfPGD()
    '''epsilons = [
        0.0,
        0.0002,
        0.0005,
        0.0008,
        0.001,
        0.0015,
        0.002,
        0.003,
        0.01,
        0.1,
        0.3,
        0.5,
        1.0,
    ]'''
    epsilons = [
        0.0005,
        0.001,
        0.002,
        0.01,
        0.1,
    ]
    raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons)
    print(type(raw_advs))
    print("atest")
    # calculate and report the robust accuracy (the accuracy of the model when
    # it is attacked)
    robust_accuracy = 1 - success.float32().mean(axis=-1)
    print("robust accuracy for perturbations with")
    for eps, acc in zip(epsilons, robust_accuracy):
        print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")

    # we can also manually check this
    # we will use the clipped advs instead of the raw advs, otherwise
    # we would need to check if the perturbation sizes are actually
    # within the specified epsilon bound
    print()
    print("we can also manually check this:")
    print()
    print("robust accuracy for perturbations with")
    for eps, advs_ in zip(epsilons, clipped_advs):
        acc2 = accuracy(fmodel, advs_, labels)
        print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
        print("    perturbation sizes:")
        perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy()
        print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
        if acc2 == 0:
            break
    fig = plt.gcf()
    os.makedirs("./image/",exist_ok=True)
    for i in range(len(raw_advs)):
        img_v = raw_advs[i].raw
        torchvision.utils.save_image(img_v, './image/'+str(i) +'.png')
Пример #24
0
def main() -> None:
    # instantiate a model (could also be a TensorFlow or JAX model)
    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    images, labels = ep.astensors(
        *samples(fmodel, dataset="imagenet", batchsize=16))
    clean_acc = accuracy(fmodel, images, labels)
    print(f"clean accuracy:  {clean_acc * 100:.1f} %")

    # replace the gradient with the gradient from another model
    model2 = fmodel  # demo, we just use the same model

    # TODO: this is still a bit annoying because we need
    # to overwrite run to get the labels
    class Attack(LinfPGD):
        def value_and_grad(self, loss_fn, x):
            val1 = loss_fn(x)
            loss_fn2 = self.get_loss_fn(model2, self.labels)
            _, grad2 = ep.value_and_grad(loss_fn2, x)
            return val1, grad2

        def run(self, model, inputs, criterion, *, epsilon, **kwargs):
            criterion_ = get_criterion(criterion)
            self.labels = criterion_.labels
            return super().run(model,
                               inputs,
                               criterion_,
                               epsilon=epsilon,
                               **kwargs)

    # apply the attack
    attack = Attack()
    epsilons = [
        0.0,
        0.0002,
        0.0005,
        0.0008,
        0.001,
        0.0015,
        0.002,
        0.003,
        0.01,
        0.1,
        0.3,
        0.5,
        1.0,
    ]
    raw_advs, clipped_advs, success = attack(fmodel,
                                             images,
                                             labels,
                                             epsilons=epsilons)

    # calculate and report the robust accuracy (the accuracy of the model when
    # it is attacked)
    robust_accuracy = 1 - success.float32().mean(axis=-1)
    print("robust accuracy for perturbations with")
    for eps, acc in zip(epsilons, robust_accuracy):
        print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")

    # we can also manually check this
    # we will use the clipped advs instead of the raw advs, otherwise
    # we would need to check if the perturbation sizes are actually
    # within the specified epsilon bound
    print()
    print("we can also manually check this:")
    print()
    print("robust accuracy for perturbations with")
    for eps, advs_ in zip(epsilons, clipped_advs):
        acc2 = accuracy(fmodel, advs_, labels)
        print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
        print("    perturbation sizes:")
        perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2,
                                                               3)).numpy()
        print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
        if acc2 == 0:
            break
Пример #25
0
def main() -> None:
    # instantiate a model (could also be a TensorFlow or JAX model)
    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    images, labels = ep.astensors(
        *samples(fmodel, dataset="imagenet", batchsize=16))
    clean_acc = accuracy(fmodel, images, labels)
    print(f"clean accuracy:  {clean_acc * 100:.1f} %")

    # apply the attack
    attack = LinfPGD()
    epsilons = [
        0.0,
        0.0002,
        0.0005,
        0.0008,
        0.001,
        0.0015,
        0.002,
        0.003,
        0.01,
        0.1,
        0.3,
        0.5,
        1.0,
    ]
    raw_advs, clipped_advs, success = attack(fmodel,
                                             images,
                                             labels,
                                             epsilons=epsilons)

    # calculate and report the robust accuracy (the accuracy of the model when
    # it is attacked)
    robust_accuracy = 1 - success.float32().mean(axis=-1)
    print("robust accuracy for perturbations with")
    for eps, acc in zip(epsilons, robust_accuracy):
        print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")

    # we can also manually check this
    # we will use the clipped advs instead of the raw advs, otherwise
    # we would need to check if the perturbation sizes are actually
    # within the specified epsilon bound
    print()
    print("we can also manually check this:")
    print()
    print("robust accuracy for perturbations with")
    for eps, advs_ in zip(epsilons, clipped_advs):
        acc2 = accuracy(fmodel, advs_, labels)
        print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
        print("    perturbation sizes:")
        perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2,
                                                               3)).numpy()
        print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
        if acc2 == 0:
            break
Пример #26
0
import torchvision.models as models
import eagerpy as ep
from foolbox import PyTorchModel, accuracy, samples
from foolbox.attacks import LinfPGD


if __name__ == "__main__":
    # instantiate a model
    model = models.resnet18(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    # get data and test the model
    # wrapping the tensors with ep.astensors is optional, but it allows
    # us to work with EagerPy tensors in the following
    images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
    print(accuracy(fmodel, images, labels))

    # apply the attack
    attack = LinfPGD()
    epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
    advs, _, success = attack(fmodel, images, labels, epsilons=epsilons)

    # calculate and report the robust accuracy
    robust_accuracy = 1 - success.float32().mean(axis=-1)
    for eps, acc in zip(epsilons, robust_accuracy):
        print(eps, acc.item())

    # we can also manually check this
    for eps, advs_ in zip(epsilons, advs):
        print(eps, accuracy(fmodel, advs_, labels))
Пример #27
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        default="mnist",
                        help='Dataset Name')
    parser.add_argument('--std',
                        type=float,
                        default=0.25,
                        help='To control the norm of perturbation')
    parser.add_argument('--steps',
                        type=int,
                        default=1e5,
                        help='The number of calls made to the model')
    parser.add_argument('--save_count',
                        type=int,
                        default=10,
                        help='Number of adversarial images to be saved')

    args = parser.parse_args()

    path = os.path.join("./Results", args.dataset)
    if not os.path.exists(path):
        os.makedirs(path)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    print(device)
    ## Download imagent data and set the correct Path

    if args.dataset == "imagenet":
        model = torchvision.models.resnet18(pretrained=True)
        test_loader = load_ImageNet()

    elif args.dataset == "mnist":

        # Load pretrained CNN on MNIST

        model = Net()
        model.load_state_dict(torch.load('mnist_cnn.pt', map_location=device))
        test_loader = torch.utils.data.DataLoader(datasets.MNIST(
            '../data',
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])),
                                                  batch_size=100,
                                                  shuffle=True)
        model = model.to(device)

    else:
        raise ValueError(f"Dataset {args.dataset} not available")
    model = model.to(device)
    model = model.eval()

    # Loading Test data

    successful = 0
    total = 0
    steps = args.steps

    while True:

        # Need data in proper format to use PyTorch loader
        # instead using foolbox!
        if args.dataset == "imagenet":
            preprocessing = dict(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225],
                                 axis=-3)
            bounds = (0, 1)
            fmodel = PyTorchModel(model,
                                  bounds=bounds,
                                  preprocessing=preprocessing)
            fmodel = fmodel.transform_bounds((0, 1))
            assert fmodel.bounds == (0, 1)
            images, labels = samples(fmodel, dataset='imagenet', batchsize=20)
            batch = 500  # number of random perturbations in each iteration
        else:
            examples = iter(test_loader)
            images, labels = examples.next()
            batch = 10000  # number of random perturbations in each iteration

        iterations = int(np.ceil(steps / batch)) + 1

        images = images.to(device)
        labels = labels.to(device)

        # no more test images
        if not labels.size:
            break

        total += len(labels)

        for image, label in zip(images, labels):
            output = model(image.unsqueeze(0))
            if output.argmax() == label:
                base_image = torch.clamp(image, 0, 1)
                base_label = label

                for iteration in range(1, iterations):

                    perturbed_samples = get_perturbed_samples(
                        base_image, batch, args.std, device)

                    prediction = model(perturbed_samples).argmax(dim=1)
                    success = (False == prediction.eq(base_label)).nonzero(
                    )  # Indexes of all incorrect predictions

                    if success.nelement():
                        successful += 1
                        print("Success rate so far :{}/{}".format(
                            successful, total))

                        if args.save_count:
                            index = success[0].item()
                            print("Norm of image", torch.norm(base_image))
                            print(
                                "Norm of added noise",
                                torch.norm(perturbed_samples[index] -
                                           base_image))

                            adversarial_image = perturbed_samples[index].to(
                                "cpu")
                            if adversarial_image.shape[0] == 1:
                                plt.imshow(adversarial_image[0], cmap='gray')
                                plt.show()
                            else:
                                plt.imshow(adversarial_image.permute(1, 2, 0))
                                plt.show()

                            # rescale image before saving
                            resize = transforms.Compose([
                                transforms.ToPILImage(),
                                transforms.Resize(size=200),
                                transforms.ToTensor()
                            ])
                            adversarial_image = resize(adversarial_image)
                            save_image(adversarial_image,
                                       os.path.join(
                                           path,
                                           str(args.save_count) + ".png"),
                                       padding=0)
                            args.save_count -= 1

                        break

    print("Accuracy on perturbed samples", 100.0 * successful / total)
Пример #28
0
	model.compile(optimizer='adam',loss=loss_fn,metrics=['accuracy'])


	model.fit(x_train,y_train, epochs=5)

	model.evaluate(x_test,y_test,verbose=2)

	#instantiate the model
	fmodel=TensorFlowModel(model, bounds=(0,1))

	#get data and test the model
	#wrapping the tensors with ep.astensors is optional, but it allows
	#us to work with EagerPy tensors in the following

	##########################################################
	images, labels = samples(fmodel, dataset="mnist", batchsize=100)
	images1, labels1=ep.astensors(*samples(fmodel, dataset="mnist", batchsize=100))
	print(accuracy(fmodel, images1, labels1))


	predict=fmodel(images).numpy()
	tf.nn.softmax(predict).numpy()
	correct_pred=tf.math.argmax(predict,1)
	print(correct_pred)

	#print(images)
	images_arr=np.array(images)

	#print(images_arr)
	#print(images_arr.shape) #16,28,28,1