def compute(self, inputs): """Computes the output of the Masking Network on @inputs. @inputs should be a Numpy array of inputs. """ # Up to differ_index, the values and activation vectors are the same. pre_network = Network(self.activation_layers[:self.differ_index]) mid_inputs = pre_network.compute(inputs) # Now we have to actually separately handle the masking when # activations != values. activation_vector = mid_inputs value_vector = mid_inputs for layer_index in range(self.differ_index, self.n_layers): activation_layer = self.activation_layers[layer_index] value_layer = self.value_layers[layer_index] if isinstance(activation_layer, FullyConnectedLayer): activation_vector = activation_layer.compute(activation_vector) value_vector = value_layer.compute(value_vector) elif isinstance(activation_layer, ReluLayer): mask = np.maximum(np.sign(activation_vector), 0.0) value_vector *= mask activation_vector *= mask else: raise NotImplementedError return value_vector
def compute(self, inputs, representatives=None): """Computes the output of the Decoupled Network on @inputs. @inputs should be a Numpy array of inputs. """ differ_index = self.differ_index if representatives is not None: differ_index = 0 # Up to differ_index, the values and activation vectors are the same. pre_network = Network(self.activation_layers[:differ_index]) mid_inputs = pre_network.compute(inputs) # Now we have to actually separately handle the masking when # activations != values. activation_vector = mid_inputs if representatives is not None: activation_vector = pre_network.compute(representatives) value_vector = mid_inputs for layer_index in range(differ_index, self.n_layers): activation_layer = self.activation_layers[layer_index] value_layer = self.value_layers[layer_index] if isinstance(activation_layer, LINEAR_LAYERS): if isinstance(activation_layer, ConcatLayer): assert not any( isinstance(input_layer, ConcatLayer) for input_layer in activation_layer.input_layers) assert all( isinstance(input_layer, LINEAR_LAYERS) for input_layer in activation_layer.input_layers) activation_vector = activation_layer.compute(activation_vector) value_vector = value_layer.compute(value_vector) elif isinstance(activation_layer, ReluLayer): mask = np.maximum(np.sign(activation_vector), 0.0) if isinstance(value_vector, np.ndarray): value_vector *= mask else: # NOTE: Originally this was torch.tensor(mask, # dtype=torch.float). I changed to this to silence a # warning from Pytorch. I don't think there will be, but it # might be worth testing for a performance regression. value_vector *= mask.clone().detach().float() activation_vector *= mask elif isinstance(activation_layer, HardTanhLayer): mask = np.ones_like(value_vector) value_vector[activation_vector >= 1.0] = 1.0 value_vector[activation_vector <= -1.0] = -1.0 np.clip(activation_vector, -1.0, 1.0, out=activation_vector) elif isinstance(activation_layer, MaxPoolLayer): activation_vector, indices = activation_layer.compute( activation_vector, return_indices=True) value_vector = value_layer.from_indices(value_vector, indices) else: raise NotImplementedError return value_vector
def deserialize(cls, serialized): """Deserializes the DDNN from the Protobuf format.""" activation_layers = serialized.activation_layers activation_layers = Network.deserialize_layers(activation_layers) value_layers = serialized.value_layers value_layers = Network.deserialize_layers(value_layers) differ_index = serialized.differ_index value_layers = activation_layers[:differ_index] + value_layers return cls(activation_layers, value_layers)
def compute(self): """Returns the classification regions of network restricted to line. Returns a list with one tuple (pre_regions, corresponding_labels) for each line in self.lines. pre_regions is a list of tuples of endpoints that partition each input line. """ if self.computed: return self.classifications self.partial_compute() self.classifications = [] classify_network = Network([ArgMaxLayer()]) for pre, post in self.transformed_lines: # First, we take each of the linear regions and split them where # the ArgMax changes. lines = list(zip(post[:-1], post[1:])) classify_transformed_lines = classify_network.exactlines( lines, compute_preimages=False, include_post=False) split_pre = [] split_post = [] for i, endpoints in enumerate(classify_transformed_lines): pre_delta = pre[i + 1] - pre[i] post_delta = post[i + 1] - post[i] for point_ratio in endpoints: point_pre = pre[i] + (point_ratio * pre_delta) point_post = post[i] + (point_ratio * post_delta) if i == 0 or not point_ratio == 0.0: split_pre.append(point_pre) split_post.append(point_post) # Now, in each of the resulting regions, we compute the # corresponding label. region_labels = [] for i in range(len(split_pre) - 1): mid_post = 0.5 * (split_post[i] + split_post[i + 1]) region_labels.append(np.argmax(mid_post)) # Finally, we merge segments with the same classification. merged_pre = [] merged_labels = [] for i, label in enumerate(region_labels): if not merged_labels or label != merged_labels[-1]: merged_pre.append(split_pre[i]) merged_labels.append(label) merged_pre.append(split_pre[-1]) regions = list(zip(merged_pre[:-1], merged_pre[1:])) self.classifications.append((regions, merged_labels)) self.computed = True return self.classifications
def deserialize(cls, serialized): """Deserializes the layer. """ if serialized.WhichOneof("layer_data") == "concat_data": from pysyrenn.frontend.network import Network layers = Network.deserialize_layers(serialized.concat_data.layers) concat_along = ConcatAlong.deserialize( serialized.concat_data.concat_along) return cls(layers, concat_along) return None
def test_serialize(): """Tests the Network's serialize and deserialize methods. """ input_dims = np.random.randint(1, 32) output_dims = np.random.randint(1, 64) weights = np.random.uniform(size=(input_dims, output_dims)) biases = np.random.uniform(size=(output_dims)) fullyconnected_layer = FullyConnectedLayer(weights, biases) relu_layer = ReluLayer() network = Network([fullyconnected_layer, relu_layer]) serialized = network.serialize() assert len(serialized.layers) == 2 assert serialized.layers[0] == fullyconnected_layer.serialize() assert serialized.layers[1] == relu_layer.serialize() deserialized = Network.deserialize(serialized) assert deserialized.serialize() == serialized
def test_compute_and_gradients(): """Tests the Network's compute and compute_gradients methods. """ batch = np.random.randint(1, 128) input_dims = np.random.randint(1, 256) output_dims = np.random.randint(1, 512) inputs = np.random.uniform(size=(batch, input_dims)) weights = np.random.uniform(size=(input_dims, output_dims)) biases = np.random.uniform(size=(output_dims)) fullyconnected_layer = FullyConnectedLayer(weights, biases) relu_layer = ReluLayer() fullyconnected_outputs = fullyconnected_layer.compute(inputs) relu_outputs = relu_layer.compute(fullyconnected_outputs) network = Network([fullyconnected_layer, relu_layer]) network_outputs = network.compute(inputs) assert np.allclose(network_outputs, relu_outputs) assert np.allclose(network_outputs, network.compute(list(inputs))) assert np.allclose(network_outputs[0], network.compute(list(inputs)[0])) for label in range(output_dims): gradients = network.compute_gradients(inputs, label) for i in range(batch): if fullyconnected_outputs[i, label] <= 0.0: assert np.allclose(gradients[i], 0.0) else: assert np.allclose(gradients[i], weights[:, label])
def test_eran_unimplemented(): """Tests loading a convolutional Network from ERAN format. """ path = "eran_unimplemented.eran" with open(path, "w") as netfile: netfile.write("Sin\n") netfile.write("[[1, 2], [3, 4]]") try: network = Network.from_file(path) assert False except NotImplementedError: assert True with open(path, "w") as netfile: netfile.write("Conv2D\nSin, filters=1, kernel_size=[1, 1], ") netfile.write("input_shape=[1, 1, 2], stride=[1, 1], padding=0\n") netfile.write("[[[[1], [2]]]]\n") netfile.write("[-10]\n") try: network = Network.from_file(path) assert False except NotImplementedError: assert True
def compute(self): """Returns the classification regions of network restricted to @planes. Returns a list with one tuple (pre_regions, corresponding_labels) for each plane in self.planes. pre_regions is a list of Numpy arrays, each one representing a VPolytope. In contrast to LinesClassifier, no attempt is made here to return the minimal set. """ if self.computed: return self.classifications self.partial_compute() self.classifications = [] classify_network = Network([ArgMaxLayer()]) for upolytope in self.transformed_planes: pre_polytopes = [] labels = [] # First, we take each of the linear partitions and split them where # the ArgMax changes. postimages = [post for pre, post in upolytope] classified_posts = classify_network.transform_planes( postimages, compute_preimages=False, include_post=False) for vpolytope, classify_upolytope in zip(upolytope, classified_posts): pre, post = vpolytope for combinations in classify_upolytope: pre_polytopes.append(np.matmul(combinations, pre)) mean_combination = np.mean(combinations, axis=0) class_region_label = np.argmax( np.matmul(mean_combination, post).flatten()) labels.append(class_region_label) self.classifications.append((pre_polytopes, labels)) self.computed = True return self.classifications
def test_conv_from_eran(): """Tests loading a convolutional Network from ERAN format. """ path = "conv_test.eran" with open(path, "w") as netfile: netfile.write("Conv2D\nReLU, filters=2, kernel_size=[2, 2], ") netfile.write("input_shape=[16, 16, 2], stride=[10, 10], padding=2\n") netfile.write("[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],") netfile.write(" [[[8, 7], [6, 5]], [[4, 3], [2, 1]]]]\n") netfile.write("[-1, -2]\n") netfile.write("Affine\n[[1, 2, 3, 4, 5, 6, 7, 8], ") netfile.write("[5, 6, 7, 8, 9, 10, 11, 12]]\n[-1, -2]\n") netfile.write("Conv2D\nHardTanh, filters=1, kernel_size=[1, 1], ") netfile.write("input_shape=[1, 1, 2], stride=[1, 1], padding=0\n") netfile.write("[[[[1], [2]]]]\n") netfile.write("[-10]\n") network = Network.from_file(path) assert len(network.layers) == 5 assert isinstance(network.layers[0], Conv2DLayer) assert np.allclose( network.layers[0].filter_weights, np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[8, 7], [6, 5]], [[4, 3], [2, 1]]]])) assert np.allclose(network.layers[0].biases, np.array([-1, -2])) assert network.layers[0].window_data.input_shape == (16, 16, 2) assert network.layers[0].window_data.window_shape == (2, 2) assert network.layers[0].window_data.strides == (10, 10) assert network.layers[0].window_data.padding == (2, 2) assert network.layers[0].window_data.out_channels == 2 assert isinstance(network.layers[1], ReluLayer) assert isinstance(network.layers[2], FullyConnectedLayer) assert np.allclose( network.layers[2].weights, np.array([[1, 5, 2, 6, 3, 7, 4, 8], [5, 9, 6, 10, 7, 11, 8, 12]]).T) assert np.allclose(network.layers[2].biases, np.array([[-1, -2]])) assert isinstance(network.layers[3], Conv2DLayer) assert np.allclose(network.layers[3].filter_weights, np.array([[[[1], [2]]]])) assert np.allclose(network.layers[3].biases, np.array([-10])) assert network.layers[3].window_data.input_shape == (1, 1, 2) assert network.layers[3].window_data.window_shape == (1, 1) assert network.layers[3].window_data.strides == (1, 1) assert network.layers[3].window_data.padding == (0, 0) assert network.layers[3].window_data.out_channels == 1 assert isinstance(network.layers[4], HardTanhLayer)
def test_fcn_from_eran(): """Tests loading a fully-connected Network from ERAN format. """ path = "fcn_test.eran" with open(path, "w") as netfile: netfile.write("ReLU\n[[-1, 2, -3], [-4, 5, -6]]\n[7, 8]\n") netfile.write("Normalize mean=[1, 2] std=[3, 4]\n") netfile.write("HardTanh\n[[-8, 7, -6], [-5, 4, -3]]\n[2, 1]\n") network = Network.from_file(path) assert len(network.layers) == 5 assert isinstance(network.layers[0], FullyConnectedLayer) assert np.allclose(network.layers[0].weights, np.array([[-1, -4], [2, 5], [-3, -6]])) assert np.allclose(network.layers[0].biases, np.array([[7, 8]])) assert isinstance(network.layers[1], ReluLayer) assert isinstance(network.layers[2], NormalizeLayer) assert np.allclose(network.layers[2].means, np.array([[1, 2]])) assert np.allclose(network.layers[2].standard_deviations, np.array([[3, 4]])) assert isinstance(network.layers[3], FullyConnectedLayer) assert np.allclose(network.layers[3].weights, np.array([[-8, -5], [7, 4], [-6, -3]])) assert np.allclose(network.layers[3].biases, np.array([[2, 1]])) assert isinstance(network.layers[4], HardTanhLayer)
def test_exactlines(): import pysyrenn.frontend.transformer_client transform_lines_ = pysyrenn.frontend.transformer_client.transform_lines input_dims = np.random.randint(1, 32) output_dims = np.random.randint(1, 64) weights = np.random.uniform(size=(input_dims, output_dims)) biases = np.random.uniform(size=(output_dims)) fullyconnected_layer = FullyConnectedLayer(weights, biases) relu_layer = ReluLayer() network = Network([fullyconnected_layer, relu_layer]) lines = list(np.random.uniform(size=(100, 2, input_dims))) def transform_lines_mock(query_network, query_lines, query_include_post=False): assert query_network.serialize() == network.serialize() if len(query_lines) == 1: assert np.allclose(query_lines, lines[:1]) else: assert np.allclose(query_lines, lines) output_lines = [] for i, line in enumerate(query_lines): output_lines.append((np.array([0.0, 1.0 / float(i + 1), 1.0]), np.array([float(2.0 * i)]))) return output_lines pysyrenn.frontend.transformer_client.transform_lines = transform_lines_mock ratios = network.exactlines(lines, compute_preimages=False, include_post=False) assert np.allclose( ratios, np.array([[0.0, 1.0 / float(i + 1), 1.0] for i in range(100)])) ratio = network.exactline(*lines[0], compute_preimages=False, include_post=False) assert np.allclose(ratio, ratios[0]) def interpolate(line_i, ratio): start, end = lines[line_i] return start + (ratio * (end - start)) preimages = network.exactlines(lines, compute_preimages=True, include_post=False) assert np.allclose( preimages, np.array([[ interpolate(i, 0.0), interpolate(i, 1.0 / float(i + 1)), interpolate(i, 1.0) ] for i in range(100)])) preimage = network.exactline(*lines[0], compute_preimages=True, include_post=False) assert np.allclose(preimage, preimages[0]) transformed = network.exactlines(lines, compute_preimages=True, include_post=True) pre, post = zip(*transformed) assert np.allclose( pre, np.array([[ interpolate(i, 0.0), interpolate(i, 1.0 / float(i + 1)), interpolate(i, 1.0) ] for i in range(100)])) assert np.allclose(post, np.array([[float(2.0 * i)] for i in range(100)])) transformed_single = network.exactline(*lines[0], compute_preimages=True, include_post=True) assert np.allclose(transformed_single[0], transformed[0][0]) assert np.allclose(transformed_single[1], transformed[0][1])
def test_squeezenet_from_onnx(): """Tests loading a SqueezeNet Network from ONNX format. """ network = Network.from_file("external/onnx_squeezenet/squeezenet1.1.onnx") assert len(network.layers) == 40
def test_transform_lines(): open_stub_ = transformer_client.open_stub input_dims = np.random.randint(1, 32) output_dims = np.random.randint(1, 64) weights = np.random.uniform(size=(input_dims, output_dims)) biases = np.random.uniform(size=(output_dims)) fullyconnected_layer = FullyConnectedLayer(weights, biases) relu_layer = ReluLayer() network = Network([fullyconnected_layer, relu_layer]) lines = list(np.random.uniform(size=(100, 2, input_dims))) response_messages = [] for i, line in enumerate(lines): transformed_line = transformer_pb.SegmentedLine() for j in range(i + 2): endpoint = transformer_pb.SegmentEndpoint() endpoint.coordinates.extend(np.arange(i, i + 10)) endpoint.preimage_ratio = j / ((i + 2) - 1) transformed_line.endpoints.append(endpoint) response = transformer_pb.TransformResponse() response.transformed_line.CopyFrom(transformed_line) response_messages.append(response) # With include_post = True. stub = ServerStubMock(response_messages) transformer_client.open_stub = lambda: stub transformed_lines = transformer_client.transform_lines(network, lines, include_post=True) def verify_response(stub, transformed_lines, included_post): assert len(transformed_lines) == len(lines) for i, line in enumerate(lines): transformed_pre, transformed_post = transformed_lines[i] assert len(transformed_pre) == (i + 2) assert np.allclose(transformed_pre, [j / ((i + 2) - 1) for j in range(i + 2)]) if included_post: assert len(transformed_post) == (i + 2) assert np.allclose(transformed_post, np.arange(i, i + 10)) else: assert transformed_post is None assert len(stub.received_messages) == 1 received = stub.received_messages[0] assert len(received) == 102 assert received[0].WhichOneof("request_data") == "layer" assert received[0].layer == fullyconnected_layer.serialize() assert received[1].layer == relu_layer.serialize() for i, request in enumerate(received[2:]): assert request.WhichOneof("request_data") == "line" assert len(request.line.endpoints) == 2 assert request.line.endpoints[0].preimage_ratio == 0.0 assert request.line.endpoints[1].preimage_ratio == 1.0 assert np.allclose(np.array(request.line.endpoints[0].coordinates), lines[i][0]) assert np.allclose(np.array(request.line.endpoints[1].coordinates), lines[i][1]) verify_response(stub, transformed_lines, True) # With include_post = False. for response_message in response_messages: for endpoint in response_message.transformed_line.endpoints: while endpoint.coordinates: endpoint.coordinates.pop() stub = ServerStubMock(response_messages) transformer_client.open_stub = lambda: stub transformed_lines = transformer_client.transform_lines(network, lines, include_post=False) verify_response(stub, transformed_lines, False) transformer_client.open_stub = open_stub_
def test_transform_planes(): open_stub_ = transformer_client.open_stub input_dims = np.random.randint(1, 32) output_dims = np.random.randint(1, 64) weights = np.random.uniform(size=(input_dims, output_dims)) biases = np.random.uniform(size=(output_dims)) fullyconnected_layer = FullyConnectedLayer(weights, biases) relu_layer = ReluLayer() network = Network([fullyconnected_layer, relu_layer]) planes = list(np.random.uniform(size=(100, 3, input_dims))) response_messages = [] for i, plane in enumerate(planes): transformed_polytope = transformer_pb.UPolytope() transformed_polytope.space_dimensions = output_dims transformed_polytope.subspace_dimensions = 2 for j in range(i + 2): polytope = transformer_pb.VPolytope() polytope.vertices.extend(np.matmul(plane, weights).flatten()) polytope.combinations.extend(np.eye(3).flatten()) polytope.num_vertices = 3 transformed_polytope.polytopes.append(polytope) response = transformer_pb.TransformResponse() response.transformed_upolytope.CopyFrom(transformed_polytope) response_messages.append(response) # With include_post = True. stub = ServerStubMock(response_messages) transformer_client.open_stub = lambda: stub transformed = transformer_client.transform_planes(network, planes) assert len(transformed) == len(planes) for i, plane in enumerate(planes): upolytope = transformed[i] assert len(upolytope) == (i + 2) for vpolytope in upolytope: transformed_pre, transformed_post = vpolytope assert len(transformed_pre) == len(transformed_post) == 3 assert np.allclose(transformed_pre, np.eye(3)) assert np.allclose(transformed_post, np.matmul(plane, weights)) assert len(stub.received_messages) == 1 received = stub.received_messages[0] assert len(received) == 102 assert received[0].WhichOneof("request_data") == "layer" assert received[0].layer == fullyconnected_layer.serialize() assert received[1].layer == relu_layer.serialize() for i, request in enumerate(received[2:]): assert request.WhichOneof("request_data") == "upolytope" assert request.upolytope.space_dimensions == input_dims assert request.upolytope.subspace_dimensions == 2 assert len(request.upolytope.polytopes) == 1 assert request.upolytope.polytopes[0].num_vertices == 3 assert np.allclose(request.upolytope.polytopes[0].vertices, planes[i].flatten()) assert np.allclose(request.upolytope.polytopes[0].combinations, np.eye(3).flatten())