示例#1
0
    def test_calc_conv_batchnorm(self):
        image_data = self._image_data()
        net = self._network('conv_batchnorm')
        input_bounds = naive_bounds.input_bounds(image_data.image, delta=.1)
        dual_obj, dual_var_lists = self._build_objective(
            net, input_bounds, image_data.label)

        # Explicitly build the expected TensorFlow graph for calculating objective.
        (
            conv2d_0,
            relu_1,  # pylint:disable=unused-variable
            linear_2,
            relu_3,  # pylint:disable=unused-variable
            linear_obj) = self._verifiable_layer_builder(net).build_layers()
        (mu_0, ), (lam_1, ), (mu_2, ), _ = dual_var_lists

        # Expected input bounds for each layer.
        conv2d_0_lb, conv2d_0_ub = self._expected_input_bounds(
            image_data.image, .1)
        conv2d_0_w, conv2d_0_b = layer_utils.combine_with_batchnorm(
            conv2d_0.module.w, None, conv2d_0.batch_norm)
        relu_1_lb, relu_1_ub = ibp.IntervalBounds(
            conv2d_0_lb, conv2d_0_ub).apply_conv2d(None, conv2d_0_w,
                                                   conv2d_0_b, 'VALID', (1, 1))
        linear_2_lb = snt.BatchFlatten()(tf.nn.relu(relu_1_lb))
        linear_2_ub = snt.BatchFlatten()(tf.nn.relu(relu_1_ub))
        linear_2_w, linear_2_b = layer_utils.combine_with_batchnorm(
            linear_2.module.w, None, linear_2.batch_norm)
        relu_3_lb, relu_3_ub = ibp.IntervalBounds(linear_2_lb,
                                                  linear_2_ub).apply_linear(
                                                      None, linear_2_w,
                                                      linear_2_b)

        # Expected objective value.
        objective = 0
        act_coeffs_0 = -common.conv_transpose(
            mu_0, conv2d_0_w, conv2d_0.input_shape, 'VALID', (1, 1))
        obj_0 = -tf.reduce_sum(mu_0 * conv2d_0_b, axis=(2, 3, 4))
        objective += standard_layer_calcs.linear_dual_objective(
            None, act_coeffs_0, obj_0, conv2d_0_lb, conv2d_0_ub)
        objective += standard_layer_calcs.activation_layer_dual_objective(
            tf.nn.relu, mu_0, lam_1, relu_1_lb, relu_1_ub)
        act_coeffs_2 = -tf.tensordot(mu_2, tf.transpose(linear_2_w), axes=1)
        obj_2 = -tf.tensordot(mu_2, linear_2_b, axes=1)
        objective += standard_layer_calcs.linear_dual_objective(
            snt.BatchFlatten(preserve_dims=2)(lam_1), act_coeffs_2, obj_2,
            linear_2_lb, linear_2_ub)
        objective_w, objective_b = common.targeted_objective(
            linear_obj.module.w, linear_obj.module.b, image_data.label)
        objective += standard_layer_calcs.activation_layer_dual_objective(
            tf.nn.relu, mu_2, -objective_w, relu_3_lb, relu_3_ub)
        objective += objective_b

        self._assert_dual_objective_close(objective, dual_obj, image_data)
def _build_spec_input():
    # Specifications expects a list of objects with output_bounds or input_bounds
    # attributes.
    w = np.identity(2, dtype=np.float32)
    b = np.ones(2, dtype=np.float32)
    snt_module = MockLinearModule(tf.constant(w), tf.constant(b))
    z_lower = np.array([[1, 2]], dtype=np.float32)
    z_upper = np.array([[3, 4]], dtype=np.float32)
    input_bounds = ibp.IntervalBounds(tf.constant(z_lower),
                                      tf.constant(z_upper))
    z_lower += b
    z_upper += b
    output_bounds = ibp.IntervalBounds(tf.constant(z_lower),
                                       tf.constant(z_upper))
    return [MockModule(input_bounds, output_bounds, snt_module)]
示例#3
0
    def test_calc_avgpool(self):
        image_data = self._image_data()
        net = self._network('avgpool')
        input_bounds = naive_bounds.input_bounds(image_data.image, delta=.1)
        dual_obj, dual_var_lists = self._build_objective(
            net, input_bounds, image_data.label)

        # Explicitly build the expected TensorFlow graph for calculating objective.
        (
            conv2d_0,
            relu_1,  # pylint:disable=unused-variable
            avgpool_2,
            relu_3,  # pylint:disable=unused-variable
            linear_obj) = self._verifiable_layer_builder(net).build_layers()
        (mu_0, ), (lam_1, ), (mu_2, ), _ = dual_var_lists

        # Expected input bounds for each layer.
        conv2d_0_lb, conv2d_0_ub = self._expected_input_bounds(
            image_data.image, .1)
        relu_1_lb, relu_1_ub = ibp.IntervalBounds(
            conv2d_0_lb, conv2d_0_ub).apply_conv2d(None, conv2d_0.module.w,
                                                   conv2d_0.module.b, 'SAME',
                                                   (1, 1))
        avgpool_2_lb = tf.nn.relu(relu_1_lb)
        avgpool_2_ub = tf.nn.relu(relu_1_ub)
        relu_3_lb = tf.nn.avg_pool(avgpool_2_lb,
                                   ksize=[2, 2],
                                   padding='VALID',
                                   strides=(1, 1))
        relu_3_ub = tf.nn.avg_pool(avgpool_2_ub,
                                   ksize=[2, 2],
                                   padding='VALID',
                                   strides=(1, 1))

        # Expected objective value.
        objective = 0
        act_coeffs_0 = -common.conv_transpose(
            mu_0, conv2d_0.module.w, conv2d_0.input_shape, 'SAME', (1, 1))
        obj_0 = -tf.reduce_sum(mu_0 * conv2d_0.module.b, axis=(2, 3, 4))
        objective += standard_layer_calcs.linear_dual_objective(
            None, act_coeffs_0, obj_0, conv2d_0_lb, conv2d_0_ub)
        objective += standard_layer_calcs.activation_layer_dual_objective(
            tf.nn.relu, mu_0, lam_1, relu_1_lb, relu_1_ub)
        act_coeffs_2 = -common.avgpool_transpose(
            mu_2,
            result_shape=relu_1.output_shape,
            kernel_shape=(2, 2),
            strides=(1, 1))
        objective += standard_layer_calcs.linear_dual_objective(
            lam_1, act_coeffs_2, 0., avgpool_2_lb, avgpool_2_ub)
        objective_w, objective_b = common.targeted_objective(
            linear_obj.module.w, linear_obj.module.b, image_data.label)
        shaped_objective_w = tf.reshape(
            objective_w,
            [self._num_classes(), self._batch_size()] + avgpool_2.output_shape)
        objective += standard_layer_calcs.activation_layer_dual_objective(
            tf.nn.relu, mu_2, -shaped_objective_w, relu_3_lb, relu_3_ub)
        objective += objective_b

        self._assert_dual_objective_close(objective, dual_obj, image_data)
示例#4
0
        def body(i, metrics):
            """Compute the sum of all metrics."""
            test_data = ibp.build_dataset(data_test,
                                          batch_size=batch_size,
                                          sequential=True)
            predictor(test_data.image, override=True, is_training=False)
            input_interval_bounds = ibp.IntervalBounds(
                tf.maximum(test_data.image - FLAGS.epsilon, input_bounds[0]),
                tf.minimum(test_data.image + FLAGS.epsilon, input_bounds[1]))
            predictor.propagate_bounds(input_interval_bounds)
            test_specification = ibp.ClassificationSpecification(
                test_data.label, num_classes)
            test_attack = attack_builder(predictor,
                                         test_specification,
                                         FLAGS.epsilon,
                                         input_bounds=input_bounds,
                                         optimizer_builder=ibp.UnrolledAdam)

            # Use CROWN-IBP bound or IBP bound.
            if FLAGS.bound_method == 'crown-ibp':
                test_losses = ibp.crown.Losses(
                    predictor,
                    test_specification,
                    test_attack,
                    use_crown_ibp=True,
                    crown_bound_schedule=tf.constant(1.))
            else:
                test_losses = ibp.Losses(predictor, test_specification,
                                         test_attack)

            test_losses(test_data.label)
            new_metrics = []
            for m, n in zip(metrics, test_losses.scalar_metrics):
                new_metrics.append(m + n)
            return i + 1, new_metrics
 def testFCBackwardBounds(self):
     m = snt.Linear(1,
                    initializers={
                        'w': tf.constant_initializer(1.),
                        'b': tf.constant_initializer(2.),
                    })
     z = tf.constant([[1, 2, 3]], dtype=tf.float32)
     m(z)  # Connect to create weights.
     m = ibp.LinearFCWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     m.propagate_bounds(input_bounds)  # Create IBP bounds.
     crown_init_bounds = _generate_identity_spec([m], shape=(1, 1, 1))
     output_bounds = m.propagate_bounds(crown_init_bounds)
     concrete_bounds = output_bounds.concretize()
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         lw, uw, lb, ub, cl, cu = sess.run([
             output_bounds.lower.w, output_bounds.upper.w,
             output_bounds.lower.b, output_bounds.upper.b,
             concrete_bounds.lower, concrete_bounds.upper
         ])
         self.assertTrue(np.all(lw == 1.))
         self.assertTrue(np.all(lb == 2.))
         self.assertTrue(np.all(uw == 1.))
         self.assertTrue(np.all(ub == 2.))
         cl = cl.item()
         cu = cu.item()
         self.assertAlmostEqual(5., cl)
         self.assertAlmostEqual(11., cu)
示例#6
0
 def body(i, metrics):
     """Compute the sum of all metrics."""
     test_data = ibp.build_dataset((x_test, y_test),
                                   batch_size=batch_size,
                                   sequential=True)
     predictor(test_data.image, override=True, is_training=False)
     input_interval_bounds = ibp.IntervalBounds(
         tf.maximum(test_data.image - FLAGS.epsilon,
                    input_bounds[0]),
         tf.minimum(test_data.image + FLAGS.epsilon,
                    input_bounds[1]))
     predictor.propagate_bounds(input_interval_bounds)
     test_specification = ibp.ClassificationSpecification(
         test_data.label, num_classes)
     test_attack = attack_builder(
         predictor,
         test_specification,
         FLAGS.epsilon,
         input_bounds=input_bounds,
         optimizer_builder=ibp.UnrolledAdam)
     test_losses = ibp.Losses(predictor, test_specification,
                              test_attack)
     test_losses(test_data.label)
     new_metrics = []
     for m, n in zip(metrics, test_losses.scalar_metrics):
         new_metrics.append(m + n)
     return i + 1, new_metrics
 def testConv2dSymbolicBounds(self):
     m = snt.Conv2D(output_channels=1,
                    kernel_shape=(2, 2),
                    padding='VALID',
                    stride=1,
                    use_bias=True,
                    initializers={
                        'w': tf.constant_initializer(1.),
                        'b': tf.constant_initializer(2.),
                    })
     z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
     z = tf.reshape(z, [1, 2, 2, 1])
     m(z)  # Connect to create weights.
     m = ibp.LinearConv2dWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     input_bounds = ibp.SymbolicBounds.convert(input_bounds)
     output_bounds = input_bounds.propagate_through(m)
     output_bounds = ibp.IntervalBounds.convert(output_bounds)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         l = l.item()
         u = u.item()
         self.assertAlmostEqual(8., l)
         self.assertAlmostEqual(16., u)
 def testConv2dBackwardBounds(self):
     m = snt.Conv2D(output_channels=1,
                    kernel_shape=(2, 2),
                    padding='VALID',
                    stride=1,
                    use_bias=True,
                    initializers={
                        'w': tf.constant_initializer(1.),
                        'b': tf.constant_initializer(2.),
                    })
     z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
     z = tf.reshape(z, [1, 2, 2, 1])
     m(z)  # Connect to create weights.
     m = ibp.LinearConv2dWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     m.propagate_bounds(input_bounds)  # Create IBP bounds.
     crown_init_bounds = _generate_identity_spec([m], shape=(1, 1, 1, 1, 1))
     output_bounds = m.propagate_bounds(crown_init_bounds)
     concrete_bounds = output_bounds.concretize()
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         l, u = sess.run([concrete_bounds.lower, concrete_bounds.upper])
         l = l.item()
         u = u.item()
         self.assertAlmostEqual(8., l)
         self.assertAlmostEqual(16., u)
 def testFCSymbolicBounds(self):
     m = snt.Linear(1,
                    initializers={
                        'w': tf.constant_initializer(1.),
                        'b': tf.constant_initializer(2.),
                    })
     z = tf.constant([[1, 2, 3]], dtype=tf.float32)
     m(z)  # Connect to create weights.
     m = ibp.LinearFCWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     input_bounds = ibp.SymbolicBounds.convert(input_bounds)
     output_bounds = input_bounds.propagate_through(m)
     concrete_bounds = ibp.IntervalBounds.convert(output_bounds)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         l, u, cl, cu = sess.run([
             output_bounds.lower, output_bounds.upper,
             concrete_bounds.lower, concrete_bounds.upper
         ])
         self.assertTrue(np.all(l.w == 1.))
         self.assertTrue(np.all(l.b == 2.))
         self.assertAlmostEqual([[0, 1, 2]], l.lower.tolist())
         self.assertAlmostEqual([[2, 3, 4]], l.upper.tolist())
         self.assertTrue(np.all(u.w == 1.))
         self.assertTrue(np.all(u.b == 2.))
         self.assertAlmostEqual([[0, 1, 2]], u.lower.tolist())
         self.assertAlmostEqual([[2, 3, 4]], u.upper.tolist())
         cl = cl.item()
         cu = cu.item()
         self.assertAlmostEqual(5., cl)
         self.assertAlmostEqual(11., cu)
 def testBatchNormIntervalBounds(self):
     z = tf.constant([[1, 2, 3]], dtype=tf.float32)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     g = tf.reshape(tf.range(-1, 2, dtype=tf.float32), [1, 3])
     b = tf.reshape(tf.range(3, dtype=tf.float32), [1, 3])
     batch_norm = ibp.BatchNorm(scale=True,
                                offset=True,
                                eps=0.,
                                initializers={
                                    'gamma':
                                    lambda *args, **kwargs: g,
                                    'beta':
                                    lambda *args, **kwargs: b,
                                    'moving_mean':
                                    tf.constant_initializer(1.),
                                    'moving_variance':
                                    tf.constant_initializer(4.),
                                })
     batch_norm(z, is_training=False)
     batch_norm = ibp.BatchNormWrapper(batch_norm)
     # Test propagation.
     output_bounds = batch_norm.propagate_bounds(input_bounds)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         self.assertAlmostEqual([[-.5, 1., 2.5]], l.tolist())
         self.assertAlmostEqual([[.5, 1., 3.5]], u.tolist())
示例#11
0
 def _propagation_test(self, wrapper, inputs, outputs):
   input_bounds = ibp.IntervalBounds(inputs, inputs)
   output_bounds = wrapper.propagate_bounds(input_bounds)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     o, l, u = sess.run([outputs, output_bounds.lower, output_bounds.upper])
     self.assertAlmostEqual(o.tolist(), l.tolist())
     self.assertAlmostEqual(o.tolist(), u.tolist())
 def testMulIntervalBounds(self):
     m = tf.multiply
     z = tf.constant([[-2, 3, 0]], dtype=tf.float32)
     m = ibp.PiecewiseMonotonicWrapper(m, (0, ))
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     output_bounds = m.propagate_bounds(input_bounds, input_bounds)
     with self.test_session() as sess:
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         self.assertAlmostEqual([[1., 4., -1.]], l.tolist())
         self.assertAlmostEqual([[9., 16., 1.]], u.tolist())
示例#13
0
  def testMultipleInputs(self):
    # Tensor to overwrite.
    def _build(z0, z1):
      return z0 + z1

    z0 = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
    z1 = tf.constant([[2, 2, 4, 4]], dtype=tf.float32)
    wrapper = ibp.VerifiableModelWrapper(_build)
    logits = wrapper(z0, z1)
    input_bounds0 = ibp.IntervalBounds(z0 - 2, z0 + 1)
    input_bounds1 = ibp.IntervalBounds(z1, z1 + 10)
    output_bounds = wrapper.propagate_bounds(input_bounds0, input_bounds1)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      o, l, u = sess.run([logits, output_bounds.lower, output_bounds.upper])
      print(o, l, u)
      self.assertAlmostEqual([[3., 4., 7., 8.]], o.tolist())
      self.assertAlmostEqual([[1., 2., 5., 6.]], l.tolist())
      self.assertAlmostEqual([[14., 15., 18., 19.]], u.tolist())
 def testSubIntervalBounds(self):
     m = tf.subtract
     z = tf.constant([[-2, 3, 0]], dtype=tf.float32)
     m = ibp.PiecewiseMonotonicWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     output_bounds = m.propagate_bounds(input_bounds, input_bounds)
     with self.test_session() as sess:
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         self.assertAlmostEqual([[-2., -2., -2.]], l.tolist())
         self.assertAlmostEqual([[2., 2., 2.]], u.tolist())
 def testReluIntervalBounds(self):
     m = tf.nn.relu
     z = tf.constant([[-2, 3]], dtype=tf.float32)
     m = ibp.IncreasingMonotonicWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     output_bounds = m.propagate_bounds(input_bounds)
     with self.test_session() as sess:
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         self.assertAlmostEqual([[0., 2.]], l.tolist())
         self.assertAlmostEqual([[0., 4.]], u.tolist())
    def testEquivalenceLinearClassification(self):
        num_classes = 3

        def _build_model():
            layer_types = (('conv2d', (2, 2), 4, 'VALID',
                            1), ('activation', 'relu'), ('linear', 10),
                           ('activation', 'relu'))
            return ibp.DNN(num_classes, layer_types)

        # Input.
        batch_size = 100
        width = height = 2
        channels = 3
        num_restarts = 10
        z = tf.random.uniform((batch_size, height, width, channels),
                              minval=-1.,
                              maxval=1.,
                              dtype=tf.float32)
        y = tf.random.uniform((batch_size, ),
                              minval=0,
                              maxval=num_classes,
                              dtype=tf.int64)
        predictor = _build_model()
        predictor = ibp.VerifiableModelWrapper(predictor)
        logits = predictor(z)
        random_logits1 = tf.random.uniform(
            (num_restarts, batch_size, num_classes))
        random_logits2 = tf.random.uniform(
            (num_restarts, num_classes - 1, batch_size, num_classes))
        input_bounds = ibp.IntervalBounds(z - 2., z + 4.)
        predictor.propagate_bounds(input_bounds)

        # Specifications.
        s1 = ibp.ClassificationSpecification(y, num_classes)
        s2 = _build_classification_specification(y, num_classes)

        def _build_values(s):
            return [
                s(predictor.modules, collapse=False),
                s(predictor.modules, collapse=True),
                s.evaluate(logits),
                s.evaluate(random_logits1),
                s.evaluate(random_logits2)
            ]

        v1 = _build_values(s1)
        v2 = _build_values(s2)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output1, output2 = sess.run([v1, v2])
        for a, b in zip(output1, output2):
            self.assertTrue(np.all(np.abs(a - b) < 1e-5))
示例#17
0
    def testEndToEnd(self):
        predictor = FixedNN()
        predictor = ibp.VerifiableModelWrapper(predictor)
        # Labels.
        labels = tf.constant([1], dtype=tf.int64)
        # Connect to input.
        z = tf.constant([[1, 2, 3]], dtype=tf.float32)
        predictor(z, is_training=True)
        # Input bounds.
        eps = 1.
        input_bounds = ibp.IntervalBounds(z - eps, z + eps)
        predictor.propagate_bounds(input_bounds)
        # Create output specification (that forces the first logits to be greater).
        c = tf.constant([[[1, -1]]], dtype=tf.float32)
        d = tf.constant([[0]], dtype=tf.float32)
        # Turn elision off for more interesting results.
        spec = ibp.LinearSpecification(c, d, collapse=False)
        # Create an attack.
        attack = ibp.UntargetedPGDAttack(predictor,
                                         spec,
                                         eps,
                                         num_steps=1,
                                         input_bounds=(-100., 100))
        # Build loss.
        losses = ibp.Losses(predictor,
                            spec,
                            attack,
                            interval_bounds_loss_type='hinge',
                            interval_bounds_hinge_margin=0.)
        losses(labels)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            # We expect the worst-case logits from IBP to be [9, 4].
            # The adversarial attack should fail since logits are always [l, l + 1].
            # Similarly, the nominal predictions are correct.
            accuracy_values, loss_values = sess.run(
                [losses.scalar_metrics, losses.scalar_losses])
            self.assertAlmostEqual(1., accuracy_values.nominal_accuracy)
            self.assertAlmostEqual(0., accuracy_values.verified_accuracy)
            self.assertAlmostEqual(1., accuracy_values.attack_accuracy)
            expected_xent = 0.31326168751822947
            self.assertAlmostEqual(expected_xent,
                                   loss_values.nominal_cross_entropy,
                                   places=5)
            self.assertAlmostEqual(expected_xent,
                                   loss_values.attack_cross_entropy,
                                   places=5)
            expected_hinge = 5.
            self.assertAlmostEqual(expected_hinge, loss_values.verified_loss)
    def testSoftmaxIntervalBounds(self, axis, expected_outputs):
        z = tf.constant([[1., -10., -10.], [1., -10., -10.]])
        input_bounds = ibp.IntervalBounds(z - 1.0, z + 10.0)

        softmax_fn = lambda x: tf.nn.softmax(x, axis=axis)
        softmax_fn = ibp.VerifiableModelWrapper(softmax_fn)
        softmax_fn(z)
        output_bounds = softmax_fn.propagate_bounds(input_bounds)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            l, u = sess.run([output_bounds.lower, output_bounds.upper])
        self.assertTrue(np.all(np.abs(expected_outputs[0] - u) < 1e-3))
        self.assertTrue(np.all(np.abs(expected_outputs[1] - l) < 1e-3))
 def testReluBackwardBounds(self):
     m = tf.nn.relu
     z = tf.constant([[-2, 3]], dtype=tf.float32)
     m = ibp.IncreasingMonotonicWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     m.propagate_bounds(input_bounds)  # Create IBP bounds.
     crown_init_bounds = _generate_identity_spec([m],
                                                 shape=(1, 2, 2),
                                                 dimension=2)
     output_bounds = m.propagate_bounds(crown_init_bounds)
     concrete_bounds = output_bounds.concretize()
     with self.test_session() as sess:
         l, u = sess.run([concrete_bounds.lower, concrete_bounds.upper])
         self.assertAlmostEqual([[0., 2.]], l.tolist())
         self.assertAlmostEqual([[0., 4.]], u.tolist())
 def testFCIntervalBounds(self):
     m = snt.Linear(1,
                    initializers={
                        'w': tf.constant_initializer(1.),
                        'b': tf.constant_initializer(2.),
                    })
     z = tf.constant([[1, 2, 3]], dtype=tf.float32)
     m(z)  # Connect to create weights.
     m = ibp.LinearFCWrapper(m)
     input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
     output_bounds = m.propagate_bounds(input_bounds)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         l, u = sess.run([output_bounds.lower, output_bounds.upper])
         l = l.item()
         u = u.item()
         self.assertAlmostEqual(5., l)
         self.assertAlmostEqual(11., u)
    def testCaching(self):
        m = snt.Linear(1,
                       initializers={
                           'w': tf.constant_initializer(1.),
                           'b': tf.constant_initializer(2.),
                       })
        z = tf.placeholder(shape=(1, 3), dtype=tf.float32)
        m(z)  # Connect to create weights.
        m = ibp.LinearFCWrapper(m)
        input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
        output_bounds = m.propagate_bounds(input_bounds)

        input_bounds.enable_caching()
        output_bounds.enable_caching()
        update_all_caches_op = tf.group(
            [input_bounds.update_cache_op, output_bounds.update_cache_op])

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            # Initialise the caches based on the model inputs.
            sess.run(update_all_caches_op, feed_dict={z: [[1., 2., 3.]]})

            l, u = sess.run([output_bounds.lower, output_bounds.upper])
            l = l.item()
            u = u.item()
            self.assertAlmostEqual(5., l)
            self.assertAlmostEqual(11., u)

            # Update the cache based on a different set of inputs.
            sess.run([output_bounds.update_cache_op],
                     feed_dict={z: [[2., 3., 7.]]})
            # We only updated the output bounds' cache.
            # This asserts that the computation depends on the underlying
            # input bounds tensor, not on cached version of it.
            # (Thus it doesn't matter what order the caches are updated.)

            l, u = sess.run([output_bounds.lower, output_bounds.upper])
            l = l.item()
            u = u.item()
            self.assertAlmostEqual(11., l)
            self.assertAlmostEqual(17., u)
示例#22
0
    def test_calc_linear(self):
        image_data = self._image_data()
        net = self._network('linear')
        input_bounds = naive_bounds.input_bounds(image_data.image, delta=.1)
        dual_obj, dual_var_lists = self._build_objective(
            net, input_bounds, image_data.label)

        # Explicitly build the expected TensorFlow graph for calculating objective.
        (
            linear_0,
            relu_1,  # pylint:disable=unused-variable
            linear_obj) = self._verifiable_layer_builder(net).build_layers()
        (mu_0, ), _ = dual_var_lists

        # Expected input bounds for each layer.
        linear_0_lb, linear_0_ub = self._expected_input_bounds(
            image_data.image, .1)
        linear_0_lb = snt.BatchFlatten()(linear_0_lb)
        linear_0_ub = snt.BatchFlatten()(linear_0_ub)
        relu_1_lb, relu_1_ub = ibp.IntervalBounds(linear_0_lb,
                                                  linear_0_ub).apply_linear(
                                                      None, linear_0.module.w,
                                                      linear_0.module.b)

        # Expected objective value.
        objective = 0
        act_coeffs_0 = -tf.tensordot(
            mu_0, tf.transpose(linear_0.module.w), axes=1)
        obj_0 = -tf.tensordot(mu_0, linear_0.module.b, axes=1)
        objective += standard_layer_calcs.linear_dual_objective(
            None, act_coeffs_0, obj_0, linear_0_lb, linear_0_ub)
        objective_w, objective_b = common.targeted_objective(
            linear_obj.module.w, linear_obj.module.b, image_data.label)
        objective += standard_layer_calcs.activation_layer_dual_objective(
            tf.nn.relu, mu_0, -objective_w, relu_1_lb, relu_1_ub)
        objective += objective_b

        self._assert_dual_objective_close(objective, dual_obj, image_data)