Ejemplo n.º 1
0
        def body(i, metrics):
            """Compute the sum of all metrics."""
            test_data = ibp.build_dataset(data_test,
                                          batch_size=batch_size,
                                          sequential=True)
            predictor(test_data.image, override=True, is_training=False)
            input_interval_bounds = ibp.IntervalBounds(
                tf.maximum(test_data.image - FLAGS.epsilon, input_bounds[0]),
                tf.minimum(test_data.image + FLAGS.epsilon, input_bounds[1]))
            predictor.propagate_bounds(input_interval_bounds)
            test_specification = ibp.ClassificationSpecification(
                test_data.label, num_classes)
            test_attack = attack_builder(predictor,
                                         test_specification,
                                         FLAGS.epsilon,
                                         input_bounds=input_bounds,
                                         optimizer_builder=ibp.UnrolledAdam)

            # Use CROWN-IBP bound or IBP bound.
            if FLAGS.bound_method == 'crown-ibp':
                test_losses = ibp.crown.Losses(
                    predictor,
                    test_specification,
                    test_attack,
                    use_crown_ibp=True,
                    crown_bound_schedule=tf.constant(1.))
            else:
                test_losses = ibp.Losses(predictor, test_specification,
                                         test_attack)

            test_losses(test_data.label)
            new_metrics = []
            for m, n in zip(metrics, test_losses.scalar_metrics):
                new_metrics.append(m + n)
            return i + 1, new_metrics
Ejemplo n.º 2
0
 def body(i, metrics):
     """Compute the sum of all metrics."""
     test_data = ibp.build_dataset((x_test, y_test),
                                   batch_size=batch_size,
                                   sequential=True)
     predictor(test_data.image, override=True, is_training=False)
     input_interval_bounds = ibp.IntervalBounds(
         tf.maximum(test_data.image - FLAGS.epsilon,
                    input_bounds[0]),
         tf.minimum(test_data.image + FLAGS.epsilon,
                    input_bounds[1]))
     predictor.propagate_bounds(input_interval_bounds)
     test_specification = ibp.ClassificationSpecification(
         test_data.label, num_classes)
     test_attack = attack_builder(
         predictor,
         test_specification,
         FLAGS.epsilon,
         input_bounds=input_bounds,
         optimizer_builder=ibp.UnrolledAdam)
     test_losses = ibp.Losses(predictor, test_specification,
                              test_attack)
     test_losses(test_data.label)
     new_metrics = []
     for m, n in zip(metrics, test_losses.scalar_metrics):
         new_metrics.append(m + n)
     return i + 1, new_metrics
    def testEquivalenceLinearClassification(self):
        num_classes = 3

        def _build_model():
            layer_types = (('conv2d', (2, 2), 4, 'VALID',
                            1), ('activation', 'relu'), ('linear', 10),
                           ('activation', 'relu'))
            return ibp.DNN(num_classes, layer_types)

        # Input.
        batch_size = 100
        width = height = 2
        channels = 3
        num_restarts = 10
        z = tf.random.uniform((batch_size, height, width, channels),
                              minval=-1.,
                              maxval=1.,
                              dtype=tf.float32)
        y = tf.random.uniform((batch_size, ),
                              minval=0,
                              maxval=num_classes,
                              dtype=tf.int64)
        predictor = _build_model()
        predictor = ibp.VerifiableModelWrapper(predictor)
        logits = predictor(z)
        random_logits1 = tf.random.uniform(
            (num_restarts, batch_size, num_classes))
        random_logits2 = tf.random.uniform(
            (num_restarts, num_classes - 1, batch_size, num_classes))
        input_bounds = ibp.IntervalBounds(z - 2., z + 4.)
        predictor.propagate_bounds(input_bounds)

        # Specifications.
        s1 = ibp.ClassificationSpecification(y, num_classes)
        s2 = _build_classification_specification(y, num_classes)

        def _build_values(s):
            return [
                s(predictor.modules, collapse=False),
                s(predictor.modules, collapse=True),
                s.evaluate(logits),
                s.evaluate(random_logits1),
                s.evaluate(random_logits2)
            ]

        v1 = _build_values(s1)
        v2 = _build_values(s2)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output1, output2 = sess.run([v1, v2])
        for a, b in zip(output1, output2):
            self.assertTrue(np.all(np.abs(a - b) < 1e-5))