Example #1
0
    def test_imagenet_vgg19(self):
        keras.backend.set_learning_phase(0)
        model = keras.applications.VGG19()
        model.trainable = False
        mean = (103.939, 116.779, 123.68)
        std = (1, 1, 1)
        data_generator_preprocess = self.ImageNetValDataGenerator(transform=lambda x: (x[..., ::-1] - mean) / std)
        data_generator_original = self.ImageNetValDataGenerator(batch_size=1)
        bounds = (0, 255)

        measure_model = KerasModel(model)

        accuracy = Accuracy()
        measure_model.predict_generator(data_generator_preprocess, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs_generator(data_generator_preprocess, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples_generator(data_generator_original, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], preprocessing=(mean, std))

        self.assertAlmostEqual(accuracy.get(1), 0.550000)
        self.assertAlmostEqual(accuracy.get(5), 0.825000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.505920, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #2
0
    def test_imagenet_mobilenet_v2(self):
        tf.get_logger().setLevel('ERROR')
        graph = tf.Graph()
        with tf.io.gfile.GFile(
                utils.python_file_dir(__file__) +
                '/models/tensorflow_mobilenet_v2/mobilenet_v2_1.4_224_frozen.pb',
                'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())
        with graph.as_default():
            input = tf.compat.v1.placeholder(np.float32,
                                             shape=[None, 224, 224, 3])
            tf.import_graph_def(graph_def, {'input': input})
        session = tf.compat.v1.InteractiveSession(graph=graph)
        logits = graph.get_tensor_by_name(
            'import/MobilenetV2/Predictions/Reshape_1:0')
        mean = (127.5, 127.5, 127.5)
        std = (127.5, 127.5, 127.5)
        data_preprocess = self.ImageNetValData(224,
                                               224,
                                               'mobilenet_v2',
                                               transform=lambda x:
                                               (x - mean) / std,
                                               label_offset=1)
        data_original = self.ImageNetValData(224,
                                             224,
                                             'mobilenet_v2',
                                             transform=None,
                                             label_offset=1)
        bounds = (0, 255)

        measure_model = TensorFlowModel(session, logits, input)

        accuracy = Accuracy()
        measure_model.predict(data_preprocess.x, data_preprocess.y,
                              [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            data_preprocess.x,
            [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            data_original.x,
            data_original.y,
            3,
            bounds, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1,
            preprocessing=(mean, std))

        session.close()

        self.assertAlmostEqual(accuracy.get(1), 0.725000)
        self.assertAlmostEqual(accuracy.get(5), 0.900000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.288900, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #3
0
    def test_imagenet_vgg19(self):
        tf.get_logger().setLevel('ERROR')
        session = tf.compat.v1.InteractiveSession(graph=tf.Graph())
        input = tf.compat.v1.placeholder(tf.float32, shape=(None, 224, 224, 3))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=DeprecationWarning)
            logits, _ = vgg.vgg_19(input, is_training=False)
        restorer = tf.compat.v1.train.Saver()
        restorer.restore(
            session,
            utils.python_file_dir(__file__) +
            '/models/tensorflow_vgg_19/vgg_19.ckpt')
        mean = (123.68, 116.78, 103.94)
        std = (1, 1, 1)
        data_preprocess = self.ImageNetValData(224,
                                               224,
                                               'vgg19',
                                               transform=lambda x:
                                               (x - mean) / std,
                                               label_offset=0)
        data_original = self.ImageNetValData(224,
                                             224,
                                             'vgg19',
                                             transform=None,
                                             label_offset=0)
        bounds = (0, 255)

        measure_model = TensorFlowModel(session, logits, input)

        accuracy = Accuracy()
        measure_model.predict(data_preprocess.x, data_preprocess.y,
                              [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            data_preprocess.x,
            [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            data_original.x,
            data_original.y,
            3,
            bounds, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1,
            preprocessing=(mean, std))

        session.close()

        self.assertAlmostEqual(accuracy.get(1), 0.625000)
        self.assertAlmostEqual(accuracy.get(5), 0.925000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.576892, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #4
0
    def test_cifar10_simple(self):
        class Model(torch.nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.conv1 = torch.nn.Conv2d(3, 32, 3)
                self.pool1 = torch.nn.MaxPool2d(2)
                self.conv2 = torch.nn.Conv2d(32, 64, 3)
                self.pool2 = torch.nn.MaxPool2d(2)
                self.conv3 = torch.nn.Conv2d(64, 64, 3)
                self.fc1 = torch.nn.Linear(64 * 4 * 4, 64)
                self.fc2 = torch.nn.Linear(64, 10)

            def forward(self, data):
                data = torch.nn.functional.relu(self.conv1(data))
                data = self.pool1(data)
                data = torch.nn.functional.relu(self.conv2(data))
                data = self.pool2(data)
                data = torch.nn.functional.relu(self.conv3(data))
                data = data.view(-1, 64 * 4 * 4)
                data = torch.nn.functional.relu(self.fc1(data))
                data = self.fc2(data)
                return data

        model = Model()
        model.load_state_dict(
            torch.load(
                utils.python_file_dir(__file__) +
                '/models/pytorch_cifar10_simple.pth'))
        dataset = self.cifar10_dataset()
        bounds = (0, 1)
        num_classes = 10

        measure_model = PyTorchModel(model)

        accuracy = Accuracy()
        measure_model.predict(dataset, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            dataset, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            dataset,
            3,
            bounds,
            num_classes, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1)

        self.assertAlmostEqual(accuracy.get(1), 0.427500)
        self.assertAlmostEqual(accuracy.get(5), 0.906400)
        self.assertAlmostEqual(neuron_coverage.get(0.6), 0.534188, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #5
0
    def test_cifar10_simple(self):
        model = mxnet.gluon.nn.Sequential()
        with model.name_scope():
            model.add(
                mxnet.gluon.nn.Conv2D(channels=32,
                                      kernel_size=3,
                                      activation='relu'))
            model.add(mxnet.gluon.nn.MaxPool2D(pool_size=2))
            model.add(
                mxnet.gluon.nn.Conv2D(channels=64,
                                      kernel_size=3,
                                      activation='relu'))
            model.add(mxnet.gluon.nn.MaxPool2D(pool_size=2))
            model.add(
                mxnet.gluon.nn.Conv2D(channels=64,
                                      kernel_size=3,
                                      activation='relu'))
            model.add(mxnet.gluon.nn.Flatten())
            model.add(mxnet.gluon.nn.Dense(64, activation='relu'))
            model.add(mxnet.gluon.nn.Dense(10))
        model.load_parameters(
            utils.python_file_dir(__file__) +
            '/models/mxnet_cifar10_simple.params')
        dataset = self.cifar10_dataset()
        bounds = (0, 1)
        num_classes = 10

        measure_model = MXNetModel(model)

        accuracy = Accuracy()
        measure_model.predict(dataset, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            dataset, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            dataset,
            3,
            bounds,
            num_classes, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1)

        self.assertAlmostEqual(accuracy.get(1), 0.687000)
        self.assertAlmostEqual(accuracy.get(5), 0.966600)
        self.assertAlmostEqual(neuron_coverage.get(0.6), 0.470085, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #6
0
    def test_mnist_simple(self):
        class Model(torch.nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.fc1 = torch.nn.Linear(1 * 28 * 28, 128)
                self.fc2 = torch.nn.Linear(128, 64)
                self.fc3 = torch.nn.Linear(64, 10)

            def forward(self, data):
                data = data.view(-1, 1 * 28 * 28)
                data = torch.nn.functional.relu(self.fc1(data))
                data = torch.nn.functional.relu(self.fc2(data))
                data = self.fc3(data)
                return data

        model = Model()
        model.load_state_dict(
            torch.load(
                utils.python_file_dir(__file__) +
                '/models/pytorch_mnist_simple.pth'))
        dataset = self.mnist_dataset()
        bounds = (0, 1)
        num_classes = 10

        measure_model = PyTorchModel(model)

        accuracy = Accuracy()
        measure_model.predict(dataset, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            dataset, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            dataset,
            3,
            bounds,
            num_classes, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1)

        self.assertAlmostEqual(accuracy.get(1), 0.962200)
        self.assertAlmostEqual(accuracy.get(5), 0.998900)
        self.assertAlmostEqual(neuron_coverage.get(0.7), 0.876238, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #7
0
    def test_mnist_simple(self):
        tf.get_logger().setLevel('ERROR')
        session = tf.compat.v1.InteractiveSession(graph=tf.Graph())
        restorer = tf.compat.v1.train.import_meta_graph(
            utils.python_file_dir(__file__) +
            '/models/tensorflow_mnist_simple/tensorflow_mnist_simple.meta')
        restorer.restore(
            session,
            tf.train.latest_checkpoint(
                utils.python_file_dir(__file__) +
                '/models/tensorflow_mnist_simple/'))
        input = session.graph.get_tensor_by_name('Placeholder:0')
        logits = session.graph.get_tensor_by_name('fc2/add:0')
        x, y = self.mnist_data()
        bounds = (0, 1)

        measure_model = TensorFlowModel(session, logits, input)

        accuracy = Accuracy()
        measure_model.predict(x, y, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            x, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            x,
            y,
            3,
            bounds, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1)

        session.close()

        self.assertAlmostEqual(accuracy.get(1), 0.937700)
        self.assertAlmostEqual(accuracy.get(5), 0.997200)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.591150, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #8
0
    def test_mnist_simple(self):
        keras.backend.set_learning_phase(0)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=DeprecationWarning)
            model = keras.models.load_model(utils.python_file_dir(__file__) + '/models/keras_mnist_simple.h5')
        model.trainable = False
        x, y = self.mnist_data()
        bounds = (0, 1)

        measure_model = KerasModel(model)

        accuracy = Accuracy()
        measure_model.predict(x, y, [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(x, [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(x[:5], y[:5], 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples])

        self.assertAlmostEqual(accuracy.get(1), 0.991000)
        self.assertAlmostEqual(accuracy.get(5), 1.000000)
        self.assertAlmostEqual(neuron_coverage.get(0.6), 0.589744, places=2)
        self.assertAlmostEqual(robustness.success_rate, 0.6666666667)
Example #9
0
    def test_imagenet_vgg16(self):
        model = mxnet.gluon.model_zoo.vision.vgg16(pretrained=True)
        dataset_preprocessed = self.ImageNetValDataset(True)
        dataset_original = self.ImageNetValDataset(False)
        bounds = (0, 1)
        num_classes = 1000

        measure_model = MXNetModel(model)

        accuracy = Accuracy()
        measure_model.predict(dataset_preprocessed,
                              [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            dataset_preprocessed,
            [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            dataset_original,
            3,
            bounds,
            num_classes, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1,
            preprocessing=(np.array(self.ImageNetValDataset.mean).reshape(
                (3, 1, 1)), np.array(self.ImageNetValDataset.std).reshape(
                    (3, 1, 1))))

        self.assertAlmostEqual(accuracy.get(1), 0.700000)
        self.assertAlmostEqual(accuracy.get(5), 0.950000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.597570, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #10
0
    def test_imagenet_resnet50_v2(self):
        tf.get_logger().setLevel('ERROR')
        session = tf.compat.v1.InteractiveSession(graph=tf.Graph())
        input = tf.compat.v1.placeholder(tf.float32, shape=(None, 299, 299, 3))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=DeprecationWarning)
            with tf.contrib.slim.arg_scope(resnet_v2.resnet_arg_scope()):
                resnet_v2.resnet_v2_50(input,
                                       num_classes=1001,
                                       is_training=False)
        restorer = tf.compat.v1.train.Saver()
        restorer.restore(
            session,
            utils.python_file_dir(__file__) +
            '/models/tensorflow_resnet_v2_50/resnet_v2_50.ckpt')
        logits = session.graph.get_tensor_by_name(
            'resnet_v2_50/predictions/Reshape:0')
        mean = (127.5, 127.5, 127.5)
        std = (127.5, 127.5, 127.5)
        data_preprocess = self.ImageNetValData(299,
                                               299,
                                               'resnet50_v2',
                                               transform=lambda x:
                                               (x - mean) / std,
                                               label_offset=1)
        data_original = self.ImageNetValData(299,
                                             299,
                                             'resnet50_v2',
                                             transform=None,
                                             label_offset=1)
        bounds = (0, 255)

        measure_model = TensorFlowModel(session, logits, input)

        accuracy = Accuracy()
        measure_model.predict(data_preprocess.x, data_preprocess.y,
                              [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            data_preprocess.x,
            [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            data_original.x,
            data_original.y,
            3,
            bounds, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1,
            preprocessing=(mean, std))

        session.close()

        self.assertAlmostEqual(accuracy.get(1), 0.750000)
        self.assertAlmostEqual(accuracy.get(5), 0.875000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.600558, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)