def _construct_optimizer(z3_mask, optimizer_type): if optimizer_type == 'image': return utils.ImageOptimizer(z3_mask=z3_mask, window_size=1, edge_length=2) else: return utils.TextOptimizer(z3_mask=z3_mask)
def test_formulate_smt_constraints_fully_connected_layer(self): # For a neural network with 4 hidden nodes in the first layer, with the # original first layer activations = 1, and the SMT encoding of # the first hidden nodes- [mask_0, mask_1, mask_2, mask_3]. For # masked_activation > delta * original (k such constraints), only k mask # bits should be set to 1 and the others to 0. image_edge_length = 2 top_k = np.random.randint(low=1, high=image_edge_length**2) z3_mask = [_get_z3_var(index=i) for i in range(image_edge_length**2)] smt_first_layer = [1 * z3.ToReal(i) for i in z3_mask] nn_first_layer = np.ones(len(smt_first_layer)) z3_optimizer = utils.ImageOptimizer(z3_mask=z3_mask, window_size=1, edge_length=image_edge_length) z3_optimizer = masking._formulate_smt_constraints_fully_connected_layer( z3_optimizer=z3_optimizer, smt_first_layer=smt_first_layer, nn_first_layer=nn_first_layer, top_k=top_k, gamma=np.random.rand()) mask, result = z3_optimizer._optimize() self.assertEqual(result, 'sat') self.assertEqual(np.sum(mask), top_k)
def test_formulate_smt_constraints_convolution_layer(self): with self.test_session(): # Temporary graphs should be created inside a session. Notice multiple # graphs are being created in this particular code. So, if each graph # isn't created inside a separate session, the tensor names will have # unwanted integer suffices, which then would cause problems while # accessing tensors by name. _create_temporary_tf_graph_cnn(self.test_model_path) image_edge_length = 4 image_channels = 3 # The 1st convolution layer has 48 neurons. top_k = np.random.randint(low=1, high=48) image = np.ones((image_edge_length, image_edge_length, image_channels)) tensor_names = { 'input': 'conv2d_input:0', 'first_layer': 'conv2d/BiasAdd:0', 'first_layer_relu': 'conv2d/Relu:0', 'logits': 'dense/BiasAdd:0', 'softmax': 'dense/Softmax:0', 'weights_layer_1': 'conv2d/Conv2D/ReadVariableOp:0', 'biases_layer_1': 'conv2d/bias/Read/ReadVariableOp:0'} session = utils.restore_model(self.test_model_path) cnn_predictions = session.run( tensor_names, feed_dict={ tensor_names['input']: image.reshape( (1, image_edge_length, image_edge_length, image_channels))}) z3_mask = [_get_z3_var(index=i) for i in range(image_edge_length ** 2)] first_layer_activations = masking._reorder(masking._remove_batch_axis( cnn_predictions['first_layer'])).reshape(-1) masked_input = masking._encode_input( image=image, z3_mask=z3_mask, window_size=1) z3_optimizer = masking._formulate_smt_constraints_convolution_layer( z3_optimizer=utils.ImageOptimizer( z3_mask=z3_mask, window_size=1, edge_length=image_edge_length), kernels=masking._reorder(cnn_predictions['weights_layer_1']), biases=cnn_predictions['biases_layer_1'], chosen_indices=first_layer_activations.argsort()[-top_k:], conv_activations=first_layer_activations, input_activation_maps=masked_input, output_activation_map_shape=(image_edge_length, image_edge_length), strides=1, padding=(0, 1), gamma=0.5) mask, result = z3_optimizer.generate_mask() self.assertEqual(result, 'sat') self.assertEqual(mask.shape, (image_edge_length, image_edge_length)) session.close()
def _process_image(image, run_params, window_size): """Generates the masked input and does a forward pass of the image. Args: image: float numpy array with shape (image_edge_length, image_edge_length, image_channels), image to be masked. For MNIST, the pixel values are between [0, 1] and for Imagenet, the pixel values are between [-117, 138]. run_params: RunParams with model_type, model_path, image_placeholder_shape, activations, tensor_names, input, first_layer, logits. window_size: int, side length of the square mask. Returns: masked_input: nested list of z3.ExprRef with dimensions (image_channels, image_edge_length, image_edge_length) unmasked_predictions: dict, * input: float numpy array, the input tensor to the neural network. * first_layer: float numpy array, the first layer tensor in the neural network. * first_layer_relu: str, the first layer relu activation tensor in the neural network. * logits: str, the logits tensor in the neural network. * softmax: float numpy array, the softmax tensor in the neural network. * weights_layer_1: float numpy array, the first layer fc / conv weights. * biases_layer_1: float numpy array, the first layer fc / conv biases. session: tf.Session, tensorflow session with the loaded neural network. optimizer: utils.ImageOptimizer, z3 optimizer for image. """ _verify_image_dimensions(image) image_edge_length, _, _ = image.shape num_masks_along_row = image_edge_length // window_size # We always find a 2d mask irrespective of the number of image channels. z3_mask = [z3.Int('mask_%d' % i) for i in range(num_masks_along_row ** 2)] session = utils.restore_model(run_params.model_path) unmasked_predictions = session.run( run_params.tensor_names, feed_dict={run_params.tensor_names['input']: image.reshape( run_params.image_placeholder_shape)}) # _encode_input generates a masked_input with a shape # (image_channels, image_edge_length, image_edge_length) return (_encode_input(image=image, z3_mask=z3_mask, window_size=window_size), unmasked_predictions, session, utils.ImageOptimizer(z3_mask=z3_mask, window_size=window_size, edge_length=image_edge_length))
def test_smt_constraints_final_layer(self): # The SMT encoding of the final layer - [mask_0, mask_1, mask_2, mask_3]. # For logit_label_index > rest, the mask_bit at label_index should be set to # 1. image_edge_length = 2 label_index = np.random.randint(low=0, high=image_edge_length ** 2) z3_mask = [_get_z3_var(index=i) for i in range(image_edge_length ** 2)] smt_output = [1 * z3.ToReal(i) for i in z3_mask] z3_optimizer = utils.ImageOptimizer( z3_mask=z3_mask, window_size=1, edge_length=image_edge_length) z3_optimizer = masking._formulate_smt_constraints_final_layer( z3_optimizer=z3_optimizer, smt_output=smt_output, delta=np.random.rand(), label_index=label_index) mask, result = z3_optimizer._optimize() self.assertEqual(result, 'sat') self.assertEqual(mask.reshape(-1)[label_index], 1) self.assertEqual(np.sum(mask), 1)
def find_mask_full_encoding(image, weights, biases, run_params, window_size, label_index, delta=0, timeout=600, num_unique_solutions=1): """Finds a binary mask for a given image and a trained Neural Network. Args: image: float numpy array with shape (image_edge_length, image_edge_length, image_channels), image to be masked. For MNIST, the pixel values are between [0, 1] and for Imagenet, the pixel values are between [-117, 138]. weights: list of num_layers float numpy arrays with shape (output_dim, input_dim), weights of the neural network. biases: list of num_layers float numpy arrays with shape (output_dim,), biases of the neural network. run_params: RunParams with model_type, model_path, image_placeholder_shape, activations, tensor_names. window_size: int, side length of the square mask. label_index: int, index of the label of the training image. delta: float, logit of the correct label is greater than the rest of the logit by an amount delta. Its value is always >= 0. It is only used when constrain_final_layer is True. timeout: int, solver timeout in seconds. num_unique_solutions: int, number of unique solutions you want to sample. Returns: result: dictionary, * image: float numpy array with shape (image_edge_length * image_edge_length * image_channels,) * combined_solver_runtime: float, time taken by the solver to find all the solutions. * unmasked_logits: float numpy array with shape (num_outputs,) * unmasked_first_layer: float numpy array with shape (num_hidden_nodes_first_layer,) * masked_first_layer: list with length num_sols, contains float numpy array with shape (num_hidden_nodes_first_layer,) * inv_masked_first_layer: list with length num_sols, contains float numpy array with shape (num_hidden_nodes_first_layer,) * masks: list with length num_sols, contains float numpy array with shape (image_edge_length ** 2,) * masked_images: list with length num_sols, contains float numpy array with shape (image_edge_length ** 2,) * inv_masked_images: list with length num_sols, contains float numpy array with shape (image_edge_length ** 2,) * masked_logits: list with length num_sols, contains float numpy array with shape (num_outputs,) * inv_masked_logits: list with length num_sols, contains float numpy array with shape (num_outputs,) * solver_outputs: list with length num_sols, contains strings corresponding to every sampled solution saying 'sat', 'unsat' or 'unknown'. """ _verify_image_dimensions(image) image_placeholder_shape = run_params.image_placeholder_shape tensor_names = run_params.tensor_names # z3's timeout is in milliseconds z3.set_option('timeout', timeout * 1000) image_edge_length, _, _ = image.shape num_masks_along_row = image_edge_length // window_size session = utils.restore_model(run_params.model_path) z3_mask = [z3.Int('mask_%d' % i) for i in range(num_masks_along_row ** 2)] unmasked_predictions = session.run( tensor_names, feed_dict={ tensor_names['input']: image.reshape(image_placeholder_shape)}) smt_output, _ = utils.smt_forward( features=utils.flatten_nested_lists(_encode_input( image=image, z3_mask=z3_mask, window_size=window_size)), weights=weights, biases=biases, activations=run_params.activations) z3_optimizer = _formulate_smt_constraints_final_layer( z3_optimizer=utils.ImageOptimizer( z3_mask=z3_mask, window_size=window_size, edge_length=image_edge_length), smt_output=smt_output, delta=delta, label_index=label_index) solver_start_time = time.time() result = collections.defaultdict(list) # All the masks found in each call of z3_optimizer.generator() is guarranteed # to be unique since duplicated solutions are blocked. For more details # refer z3_optimizer.generator(). for mask, solver_output in z3_optimizer.generator(num_unique_solutions): _record_solution(result=result, mask=mask, solver_output=solver_output, image=image, session=session, run_params=run_params) result.update({ 'image': image.reshape(-1), 'combined_solver_runtime': time.time() - solver_start_time, 'unmasked_logits': np.squeeze(unmasked_predictions['logits']), 'unmasked_first_layer': np.squeeze(unmasked_predictions['first_layer'])}) session.close() return result