Example #1
0
    def test_smt_convolution(self, input_activation_map_channels):
        activation_map_size = 224
        image = np.random.rand(activation_map_size, activation_map_size,
                               input_activation_map_channels)
        kernels = np.random.rand(7, 7, input_activation_map_channels, 2)

        with self.test_session() as sess:
            tf_convolution = sess.run(
                tf.nn.convolution(input=np.expand_dims(image, axis=0),
                                  filter=kernels,
                                  strides=2,
                                  padding='SAME',
                                  data_format=None,
                                  dilations=None,
                                  name=None))

        np.testing.assert_allclose(
            np.moveaxis(tf_convolution, -1, 0).reshape(-1),
            utils.flatten_nested_lists(
                utils.smt_convolution(
                    input_activation_maps=np.moveaxis(np.copy(image), -1, 0),
                    kernels=kernels,
                    kernel_biases=np.zeros(kernels.shape[-1]),
                    padding=(2, 3),
                    strides=2)))
Example #2
0
    def test_flatten_nested_lists(self):
        image_edge_length = 2
        activation_maps = np.random.rand(3, image_edge_length,
                                         image_edge_length)

        flattened_convolutions = utils.flatten_nested_lists(
            activation_maps=activation_maps)

        np.testing.assert_allclose(np.asarray(flattened_convolutions),
                                   activation_maps.reshape(-1))
Example #3
0
    def test_encode_input(self, image_channels):
        # Creates a random image and checks if the encoded image, after multiplying
        # the mask bits (set to 1) is the same as the original image.
        image_edge_length = 2
        image = np.random.rand(image_edge_length, image_edge_length,
                               image_channels)
        z3_var = _get_z3_var(index=0)

        # encoded_image has dimensions
        # (image_channels, image_edge_length, image_edge_length)
        encoded_image = masking._encode_input(
            image=image, z3_mask=[z3_var for _ in range(image_edge_length**2)])
        solver = z3.Solver()
        solver.add(z3_var == 1)
        # Swap the axes of the image so that it has the same dimensions as the
        # encoded image.
        image = masking._reorder(image).reshape(-1)
        encoded_image = utils.flatten_nested_lists(encoded_image)
        for i in range(image_channels * image_edge_length**2):
            solver.add(encoded_image[i] == image[i])

        self.assertEqual(str(solver.check()), 'sat')
Example #4
0
def find_mask_full_encoding(image,
                            weights,
                            biases,
                            run_params,
                            window_size,
                            label_index,
                            delta=0,
                            timeout=600,
                            num_unique_solutions=1):
  """Finds a binary mask for a given image and a trained Neural Network.

  Args:
    image: float numpy array with shape (image_edge_length, image_edge_length,
        image_channels), image to be masked. For MNIST, the pixel values are
        between [0, 1] and for Imagenet, the pixel values are between
        [-117, 138].
    weights: list of num_layers float numpy arrays with shape
        (output_dim, input_dim), weights of the neural network.
    biases: list of num_layers float numpy arrays with shape (output_dim,),
        biases of the neural network.
    run_params: RunParams with model_type, model_path, image_placeholder_shape,
        activations, tensor_names.
    window_size: int, side length of the square mask.
    label_index: int, index of the label of the training image.
    delta: float, logit of the correct label is greater than the rest of the
        logit by an amount delta. Its value is always >= 0. It is only used when
        constrain_final_layer is True.
    timeout: int, solver timeout in seconds.
    num_unique_solutions: int, number of unique solutions you want to sample.

  Returns:
    result: dictionary,
      * image: float numpy array with shape
          (image_edge_length * image_edge_length * image_channels,)
      * combined_solver_runtime: float, time taken by the solver to find all
          the solutions.
      * unmasked_logits: float numpy array with shape (num_outputs,)
      * unmasked_first_layer: float numpy array with shape
          (num_hidden_nodes_first_layer,)
      * masked_first_layer: list with length num_sols, contains float numpy
          array with shape (num_hidden_nodes_first_layer,)
      * inv_masked_first_layer: list with length num_sols, contains float numpy
          array with shape (num_hidden_nodes_first_layer,)
      * masks: list with length num_sols, contains float numpy array
          with shape (image_edge_length ** 2,)
      * masked_images: list with length num_sols, contains float numpy array
          with shape (image_edge_length ** 2,)
      * inv_masked_images: list with length num_sols, contains float numpy
          array with shape (image_edge_length ** 2,)
      * masked_logits: list with length num_sols, contains float numpy array
          with shape (num_outputs,)
      * inv_masked_logits: list with length num_sols, contains float numpy
          array with shape (num_outputs,)
      * solver_outputs: list with length num_sols, contains strings
          corresponding to every sampled solution saying 'sat', 'unsat' or
          'unknown'.
  """
  _verify_image_dimensions(image)
  image_placeholder_shape = run_params.image_placeholder_shape
  tensor_names = run_params.tensor_names
  # z3's timeout is in milliseconds
  z3.set_option('timeout', timeout * 1000)
  image_edge_length, _, _ = image.shape
  num_masks_along_row = image_edge_length // window_size
  session = utils.restore_model(run_params.model_path)

  z3_mask = [z3.Int('mask_%d' % i) for i in range(num_masks_along_row ** 2)]

  unmasked_predictions = session.run(
      tensor_names,
      feed_dict={
          tensor_names['input']: image.reshape(image_placeholder_shape)})

  smt_output, _ = utils.smt_forward(
      features=utils.flatten_nested_lists(_encode_input(
          image=image,
          z3_mask=z3_mask,
          window_size=window_size)),
      weights=weights,
      biases=biases,
      activations=run_params.activations)

  z3_optimizer = _formulate_smt_constraints_final_layer(
      z3_optimizer=utils.ImageOptimizer(
          z3_mask=z3_mask,
          window_size=window_size,
          edge_length=image_edge_length),
      smt_output=smt_output,
      delta=delta,
      label_index=label_index)
  solver_start_time = time.time()
  result = collections.defaultdict(list)

  # All the masks found in each call of z3_optimizer.generator() is guarranteed
  # to be unique since duplicated solutions are blocked. For more details
  # refer z3_optimizer.generator().
  for mask, solver_output in z3_optimizer.generator(num_unique_solutions):
    _record_solution(result=result,
                     mask=mask,
                     solver_output=solver_output,
                     image=image,
                     session=session,
                     run_params=run_params)
  result.update({
      'image': image.reshape(-1),
      'combined_solver_runtime': time.time() - solver_start_time,
      'unmasked_logits': np.squeeze(unmasked_predictions['logits']),
      'unmasked_first_layer': np.squeeze(unmasked_predictions['first_layer'])})
  session.close()
  return result
Example #5
0
def find_mask_first_layer(image,
                          label_index,
                          run_params,
                          window_size,
                          score_method,
                          top_k=None,
                          gamma=None,
                          timeout=600,
                          num_unique_solutions=1):
  """Finds a binary mask for a given image and a trained Neural Network.

  Args:
    image:
      * image: float numpy array with shape (image_edge_length,
          image_edge_length, image_channels), image to be masked. For MNIST,
          the pixel values are between [0, 1] and for Imagenet, the pixel
          values are between [-117, 138].
      * text: float numpy array with shape (num_words,), text to be masked.
    label_index: int, index of the label of the training image.
    run_params: RunParams with model_type, model_path, image_placeholder_shape,
        padding, strides, tensor_names.
    window_size: int, side length of the square mask.
    score_method: str, assigns scores to hidden nodes, and nodes with the
        top_k scores are chosen. Takes a value -
        {'activations', 'blurred_gradients', 'gradients',
        'integrated_gradients', 'integrated_gradients_black_white_baselines'}.
    top_k: int, constrain the nodes with top k activations in the first hidden
        layer. It is only used when constrain_final_layer is false.
    gamma: float, masked activation is greater than gamma times the unmasked
        activation. Its value is always between [0,1).
    timeout: int, solver timeout in seconds.
    num_unique_solutions: int, number of unique solutions you want to sample.

  Returns:
    result: dictionary,
      * image: float numpy array with shape
          (image_edge_length * image_edge_length * image_channels,)
      * combined_solver_runtime: float, time taken by the solver to find all
          the solutions.
      * masked_first_layer: list with length num_unique_solutions,
          contains float list with length (num_hidden_nodes_first_layer,)
      * inv_masked_first_layer: list with length num_unique_solutions,
          contains float list with length (num_hidden_nodes_first_layer,)
      * masks: list with length num_unique_solutions, contains float numpy array
          with shape (image_edge_length ** 2,)
      * masked_images: list with length num_unique_solutions, contains float
          numpy array with shape (image_channels * image_edge_length ** 2,)
      * inv_masked_images: list with length num_unique_solutions,
          contains float numpy array with shape (image_edge_length ** 2,)
      * masked_logits: list with length num_unique_solutions,
          contains float list with length (num_outputs,)
      * inv_masked_logits: list with length num_unique_solutions, contains float
          list with length (num_outputs,)
      * solver_outputs: list with length num_unique_solutions, contains strings
          corresponding to every sampled solution saying 'sat', 'unsat' or
          'unknown'.
      * chosen_indices
  """
  # z3's timeout is in milliseconds
  z3.set_option('timeout', timeout * 1000)
  model_type = run_params.model_type
  result = collections.defaultdict(list)

  if model_type == 'text_cnn':
    # For text data, window size is always 1 i.e. 1 masking variable per word.
    masked_input, unmasked_predictions, session, z3_optimizer = _process_text(
        image, run_params)
  else:
    masked_input, unmasked_predictions, session, z3_optimizer = _process_image(
        image, run_params, window_size)

  if model_type == 'fully_connected':
    _, smt_hidden_input = utils.smt_forward(
        features=utils.flatten_nested_lists(masked_input),
        weights=[unmasked_predictions['weights_layer_1']],
        biases=[unmasked_predictions['biases_layer_1']],
        activations=['relu'])
    # assign first layer pre-relu activations to smt_hidden_input
    z3_optimizer = _formulate_smt_constraints_fully_connected_layer(
        z3_optimizer=z3_optimizer,
        nn_first_layer=unmasked_predictions['first_layer'].reshape(-1),
        smt_first_layer=smt_hidden_input[0],
        gamma=gamma,
        top_k=top_k)
  else:
    chosen_indices = _sort_indices(
        unmasked_predictions=unmasked_predictions,
        score_method=score_method,
        session=session,
        image=image,
        run_params=run_params,
        label_index=label_index)
    result.update({'chosen_indices': chosen_indices})
    z3_optimizer = _formulate_smt_constraints_convolution_layer(
        z3_optimizer=z3_optimizer,
        kernels=_reshape_kernels(
            kernels=unmasked_predictions['weights_layer_1'],
            model_type=model_type),
        biases=unmasked_predictions['biases_layer_1'],
        chosen_indices=chosen_indices[-top_k:],  # pylint: disable=invalid-unary-operand-type
        # unmasked_predictions['first_layer'] has the shape
        # (batch_size, output_activation_map_size, output_activation_map_size,
        # output_activation_map_channels). This is reshaped into
        # (output_activation_map_channels, output_activation_map_size,
        # output_activation_map_size) and then flattened.
        conv_activations=_reorder(_remove_batch_axis(
            unmasked_predictions['first_layer'])).reshape(-1),
        input_activation_maps=masked_input,
        output_activation_map_shape=_get_activation_map_shape(
            activation_maps_shape=unmasked_predictions['first_layer'].shape,
            model_type=model_type),
        strides=run_params.strides,
        padding=run_params.padding,
        gamma=gamma)

  solver_start_time = time.time()
  # All the masks found in each call of z3_optimizer.generator() is guaranteed
  # to be unique since duplicated solutions are blocked. For more details
  # refer z3_optimizer.generator().
  for mask, solver_output in z3_optimizer.generator(num_unique_solutions):
    _record_solution(result=result,
                     mask=mask,
                     solver_output=solver_output,
                     image=image,
                     session=session,
                     run_params=run_params)
  result.update({
      'image': image.reshape(-1),
      'combined_solver_runtime': time.time() - solver_start_time,
      'unmasked_logits': unmasked_predictions['logits'].reshape(-1),
      'unmasked_first_layer': unmasked_predictions['first_layer'].reshape(-1)})
  session.close()
  return result