def input_bounds(inputs, delta, lower_bound=0., upper_bound=1., preprocess_fn=None): """Calculates interval bounds on the network inputs. Args: inputs: 2D tensor of shape (batch_size, input_size), or 4D tensor of shape (batch_size, height, width, channels), of input examples. delta: Permitted perturbation on each input. lower_bound: Scalar - smallest permissible input (pixel) value. upper_bound: Scalar - largest permissible input (pixel) value. preprocess_fn: Optional function mapping tensor to tensor performing pre-processing on the raw inputs. Returns: `IntervalBounds` for the inputs, relative to `inputs`. """ # Input range, according to permitted perturbation radius. if preprocess_fn: lb = preprocess_fn(tf.maximum(inputs - delta, lower_bound)) - inputs ub = preprocess_fn(tf.minimum(inputs + delta, upper_bound)) - inputs else: lb = tf.maximum(-delta, lower_bound - inputs) ub = tf.minimum(delta, upper_bound - inputs) return ibp.RelativeIntervalBounds(lb, ub, inputs)
def test_batchnorm_bounds(self, batchnorm_class, dtype, tol, is_training): batch_size = 11 input_size = 7 output_size = 5 lb_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size)) ub_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size)) lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in) nominal = tf.random_normal(dtype=dtype, shape=(batch_size, input_size)) # Linear layer. w = tf.random_normal(dtype=dtype, shape=(input_size, output_size)) b = tf.random_normal(dtype=dtype, shape=(output_size, )) # Batch norm layer. epsilon = 1.e-2 bn_initializers = { 'beta': tf.random_normal_initializer(), 'gamma': tf.random_uniform_initializer(.1, 3.), 'moving_mean': tf.random_normal_initializer(), 'moving_variance': tf.random_uniform_initializer(.1, 3.) } batchnorm_module = batchnorm_class(offset=True, scale=True, eps=epsilon, initializers=bn_initializers) # Connect the batchnorm module to the graph. batchnorm_module(tf.random_normal(dtype=dtype, shape=(batch_size, output_size)), is_training=is_training) bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal, ub_in - nominal, nominal) bounds_out = bounds_in.apply_linear(None, w, b) bounds_out = bounds_out.apply_batch_norm( batchnorm_module, batchnorm_module.mean if is_training else batchnorm_module.moving_mean, batchnorm_module.variance if is_training else batchnorm_module.moving_variance, batchnorm_module.gamma, batchnorm_module.beta, epsilon) lb_out, ub_out = bounds_out.lower, bounds_out.upper # Separately, calculate dual objective by adjusting the linear layer. wn, bn = layer_utils.combine_with_batchnorm(w, b, batchnorm_module) bounds_out_lin = bounds_in.apply_linear(None, wn, bn) lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper init_op = tf.global_variables_initializer() with self.test_session() as session: session.run(init_op) (lb_out_val, ub_out_val, lb_out_lin_val, ub_out_lin_val) = session.run( (lb_out, ub_out, lb_out_lin, ub_out_lin)) self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol) self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
def test_linear_bounds(self, dtype, tol): w = tf.constant([[1.0, 2.0, 3.0], [4.0, -5.0, 6.0]], dtype=dtype) b = tf.constant([0.1, 0.2, 0.3], dtype=dtype) lb_in = tf.constant([[-1.0, -1.0]], dtype=dtype) ub_in = tf.constant([[2.0, 2.0]], dtype=dtype) nominal = tf.constant([[3.1, 4.2]], dtype=dtype) bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal, ub_in - nominal, nominal) bounds_out = bounds_in.apply_linear(None, w, b) lb_out, ub_out = bounds_out.lower, bounds_out.upper lb_out_exp = np.array([[-4.9, -11.8, -8.7]]) ub_out_exp = np.array([[10.1, 9.2, 18.3]]) with self.test_session() as session: lb_out_act, ub_out_act = session.run((lb_out, ub_out)) self.assertAllClose(lb_out_exp, lb_out_act, atol=tol, rtol=tol) self.assertAllClose(ub_out_exp, ub_out_act, atol=tol, rtol=tol)
def test_linear_bounds_shape(self, dtype): batch_size = 11 input_size = 7 output_size = 5 w = tf.placeholder(dtype=dtype, shape=(input_size, output_size)) b = tf.placeholder(dtype=dtype, shape=(output_size, )) lb_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) ub_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) nominal = tf.placeholder(dtype=dtype, shape=(batch_size, input_size)) bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal) bounds_out = bounds_in.apply_linear(None, w, b) lb_out, ub_out = bounds_out.lower, bounds_out.upper self.assertEqual(dtype, lb_out.dtype) self.assertEqual(dtype, ub_out.dtype) self.assertEqual((batch_size, output_size), lb_out.shape) self.assertEqual((batch_size, output_size), ub_out.shape)
def test_conv2d_bounds(self, dtype, tol): batch_size = 53 input_height = 17 input_width = 7 kernel_height = 3 kernel_width = 4 input_channels = 3 output_channels = 2 padding = 'VALID' strides = (2, 1) w = tf.random_normal(dtype=dtype, shape=(kernel_height, kernel_width, input_channels, output_channels)) b = tf.random_normal(dtype=dtype, shape=(output_channels, )) lb_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) ub_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in) nominal = tf.random_normal(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal, ub_in - nominal, nominal) bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides) lb_out, ub_out = bounds_out.lower, bounds_out.upper # Compare against equivalent linear layer. bounds_out_lin = _materialised_conv_bounds(w, b, padding, strides, bounds_in) lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper with self.test_session() as session: (lb_out_val, ub_out_val, lb_out_lin_val, ub_out_lin_val) = session.run( (lb_out, ub_out, lb_out_lin, ub_out_lin)) self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol) self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
def test_conv2d_bounds_shape(self, dtype): batch_size = 23 input_height = 17 input_width = 7 kernel_height = 3 kernel_width = 4 input_channels = 3 output_channels = 5 padding = 'VALID' strides = (2, 1) # Expected output dimensions, based on convolution settings. output_height = 8 output_width = 4 w = tf.placeholder(dtype=dtype, shape=(kernel_height, kernel_width, input_channels, output_channels)) b = tf.placeholder(dtype=dtype, shape=(output_channels, )) lb_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) ub_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) nominal = tf.placeholder(dtype=dtype, shape=(batch_size, input_height, input_width, input_channels)) bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal) bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides) lb_out, ub_out = bounds_out.lower, bounds_out.upper self.assertEqual(dtype, lb_out.dtype) self.assertEqual(dtype, ub_out.dtype) self.assertEqual( (batch_size, output_height, output_width, output_channels), lb_out.shape) self.assertEqual( (batch_size, output_height, output_width, output_channels), ub_out.shape)