def fully_connected_node_test(converter: conversion.ConversionStrategy, has_bias: bool): print("FULLY CONNECTED NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.FullyConnectedNode("FullyConnected_1", (3, 4, 5), 5, has_bias=has_bias)) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.FullyConnectedNode) assert isinstance(end_node, nodes.FullyConnectedNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.in_features == end_node.in_features assert start_node.out_features == end_node.out_features assert (start_node.weight == end_node.weight).all() if start_node.bias is not None: assert (start_node.bias == end_node.bias).all() else: assert start_node.bias is None and end_node.bias is None assert start_node.has_bias == end_node.has_bias assert start_node.identifier == end_node.identifier
def input_search_lbl( net: networks.SequentialNetwork, ref_output: Tensor, starset_list: List[abst.StarSet], max_iter: int, threshold: float = 1e-5, optimizer_con: type = torch.optim.SGD, opt_params: dict = None, scheduler_con: type = torch.optim.lr_scheduler.ReduceLROnPlateau, scheduler_params: dict = None, max_iter_no_change: int = None): logger = logging.getLogger(logger_name) logger.info( f"LAYER-BY-LAYER SEARCH of Input Point corresponding to Output: {ref_output}." ) current_node = net.get_first_node() node_list = [] while current_node is not None: node_list.append(current_node) current_node = net.get_next_node(current_node) node_list.reverse() starset_list.reverse() temp_ref_output = ref_output for i in range(len(node_list)): temp_net = networks.SequentialNetwork("temp", "temp") temp_net.add_node(copy.deepcopy(node_list[i])) temp_start_input = list(starset_list[i].stars)[0].get_samples(1)[0] temp_start_input = temp_start_input.squeeze() temp_ref_output = temp_ref_output.squeeze() logger.info( f"ANALYZING LAYER: {node_list[i].identifier}. Starting Input = {temp_start_input}, " f"Reference Output = {temp_ref_output}") temp_correct, temp_current_input, temp_current_output = input_search( temp_net, temp_ref_output, temp_start_input, max_iter, threshold, optimizer_con, opt_params, scheduler_con, scheduler_params, max_iter_no_change) logger.info( f"ENDED LAYER SEARCH. FOUND = {temp_correct}, INPUT = {temp_current_input}, " f"OUTPUT = {temp_current_output}") if not temp_correct: logger.info(f"Search Failed at layer: {node_list[i].identifier}") return False, None, None temp_ref_output = temp_current_input py_net = cv.PyTorchConverter().from_neural_network(net).pytorch_network py_current_input = torch.from_numpy(temp_current_input) py_current_output = py_net(py_current_input) return temp_correct, py_current_input.detach().numpy( ), py_current_output.detach().numpy()
def conv_node_test(converter: conversion.ConversionStrategy, has_bias: bool): print("CONV NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.ConvNode("Conv_1", (3, 32, 32), 3, (3, 3), (1, 1), (1, 1, 1, 1), (0, 0), 1, has_bias)) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.ConvNode) assert isinstance(end_node, nodes.ConvNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.in_channels == end_node.in_channels assert start_node.out_channels == end_node.out_channels assert start_node.kernel_size == end_node.kernel_size assert start_node.stride == end_node.stride assert start_node.padding == end_node.padding assert start_node.dilation == end_node.dilation assert start_node.groups == end_node.groups assert (start_node.weight == end_node.weight).all() if start_node.bias is not None: assert (start_node.bias == end_node.bias).all() else: assert start_node.bias is None and end_node.bias is None assert start_node.has_bias == end_node.has_bias assert start_node.identifier == end_node.identifier
def batchnorm_node_test(converter: conversion.ConversionStrategy): print("BATCHNORM NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node(nodes.BatchNormNode("Batchnorm_1", (4, 5, 6, 3))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.BatchNormNode) assert isinstance(end_node, nodes.BatchNormNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.num_features == end_node.num_features assert (start_node.weight == end_node.weight).all() assert (start_node.bias == end_node.bias).all() assert (start_node.running_mean == end_node.running_mean).all() assert (start_node.running_var == end_node.running_var).all() assert math.isclose(start_node.eps, end_node.eps, abs_tol=float_tolerance) assert start_node.track_running_stats == end_node.track_running_stats assert start_node.affine == end_node.affine assert math.isclose(start_node.momentum, end_node.momentum, abs_tol=float_tolerance) assert start_node.identifier == end_node.identifier
def __init__(self): super(QObject, self).__init__() self.file_name = ("", "") self.network = pynn.SequentialNetwork("", "X") self.properties = dict() self.input_handler = None self.output_handler = None
def dropout_node_test(converter: conversion.ConversionStrategy): print("DROPOUT NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node(nodes.DropoutNode("Dropout_1", (3, 4, 5, 6))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.DropoutNode) assert isinstance(end_node, nodes.DropoutNode) assert start_node.p == end_node.p assert start_node.identifier == end_node.identifier
def flatten_node_test(converter: conversion.ConversionStrategy): print("FLATTEN NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node(nodes.FlattenNode("Flatten_1", (3, 4, 5, 6))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.FlattenNode) assert isinstance(end_node, nodes.FlattenNode) assert start_node.axis == end_node.axis assert start_node.identifier == end_node.identifier
def relu_node_test(converter: conversion.ConversionStrategy): print("RELU NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node(nodes.ReLUNode("ReLU_1", (3, 3, 3))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.ReLUNode) assert isinstance(end_node, nodes.ReLUNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.identifier == end_node.identifier
def reshape_node_test(converter: conversion.ConversionStrategy): print("RESHAPE NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.ReshapeNode("Reshape_1", (3, 4, 5, 6), (3, 0, -1), False)) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.ReshapeNode) assert isinstance(end_node, nodes.ReshapeNode) assert start_node.shape == end_node.shape assert start_node.identifier == end_node.identifier
def unsqueeze_node_test(converter: conversion.ConversionStrategy): print("UNSQUEEZE NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.UnsqueezeNode("Unsqueeze_1", (3, 4, 5, 6), (0, 3))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.UnsqueezeNode) assert isinstance(end_node, nodes.UnsqueezeNode) assert start_node.axes == end_node.axes assert start_node.identifier == end_node.identifier
def lrn_node_test(converter: conversion.ConversionStrategy): print("LRN NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node(nodes.LRNNode("LRN_1", (3, 32, 32), 3)) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.LRNNode) assert isinstance(end_node, nodes.LRNNode) assert math.isclose(start_node.alpha, end_node.alpha, abs_tol=float_tolerance) assert math.isclose(start_node.beta, end_node.beta, abs_tol=float_tolerance) assert math.isclose(start_node.k, end_node.k, abs_tol=float_tolerance) assert start_node.size == end_node.size assert start_node.identifier == end_node.identifier
def maxpool_node_test(converter: conversion.ConversionStrategy): print("MAXPOOL NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.MaxPoolNode("Maxpool_1", (3, 32, 32), (3, 3), (1, 1), (1, 1, 1, 1), (0, 0))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.MaxPoolNode) assert isinstance(end_node, nodes.MaxPoolNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.kernel_size == end_node.kernel_size assert start_node.stride == end_node.stride assert start_node.padding == end_node.padding assert start_node.ceil_mode == end_node.ceil_mode assert start_node.return_indices == end_node.return_indices assert start_node.identifier == end_node.identifier
def averagepool_node_test(converter: conversion.ConversionStrategy): print("AVERAGEPOOL NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.AveragePoolNode("AveragePool_1", (3, 32, 32), (3, 3), (1, 1), (1, 1, 1, 1))) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.AveragePoolNode) assert isinstance(end_node, nodes.AveragePoolNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.kernel_size == end_node.kernel_size assert start_node.stride == end_node.stride assert start_node.padding == end_node.padding assert start_node.ceil_mode == end_node.ceil_mode assert start_node.count_include_pad == end_node.count_include_pad assert start_node.identifier == end_node.identifier
in_pred_bias = np.array(in_pred_bias) in_pred_mat = np.array(in_pred_mat) # Creation of the matrixes defining the negation of the wanted property (i.e., unsafe region) # (i.e., out_pred_mat * y <= out_pred_bias). out_pred_mat = np.array(unsafe_mats[i]) if property_ids[i] == "Property 1": out_pred_bias = (np.array(unsafe_vecs[i]) - outputMean) / outputRange else: out_pred_bias = np.array(unsafe_vecs[i]) # Construction of our internal representation for the ACAS network. network = networks.SequentialNetwork( f"ACAS_XU_{networks_ids[i][j]}", "X") for k in range(len(weights)): new_fc_node = nodes.FullyConnectedNode(f"FC_{k}", (weights[k].shape[1], ), weights[k].shape[0], weights[k], biases[k], True) network.add_node(new_fc_node) if k < len(weights) - 1: new_relu_node = nodes.ReLUNode(f"ReLU_{k}", (weights[k].shape[0], )) network.add_node(new_relu_node)
np.array([[1, -1, 0, 0, 0], [1, 0, -1, 0, 0], [1, 0, 0, -1, 0], [1, 0, 0, 0, -1]])] unsafe_vecs = [np.array([[0], [0], [0], [0]]), np.array([[0], [0], [0], [0]])] """ p_id = ["P3", "P4"] prop_set = False for i in range(1, 6): for j in range(1, 10): weights, biases, inputMeans, inputRanges, outputMean, outputRange = \ utilities.parse_nnet(f"nnet/ACASXU_experimental_v2a_{i}_{j}.nnet") network = networks.SequentialNetwork(f"ACAS_XU_{i}_{j}", "X") for k in range(len(weights)): new_fc_node = nodes.FullyConnectedNode(f"FC{k}", (weights[k].shape[1], ), weights[k].shape[0], weights[k], biases[k], True) network.add_node(new_fc_node) if k < len(weights) - 1: new_relu_node = nodes.ReLUNode(f"ReLU{k}", (weights[k].shape[0], )) network.add_node(new_relu_node) if not prop_set:
import torch.nn as nn import torch import copy import logging # Logger Setup logger = logging.getLogger("pynever") logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) logger.addHandler(ch) # Building of the network of interest small_net = networks.SequentialNetwork("SmallNetwork", "X") small_net.add_node(nodes.FullyConnectedNode('Linear_1', (784, ), 64)) small_net.add_node(nodes.BatchNormNode('BatchNorm_2', (64, ))) small_net.add_node(nodes.ReLUNode('ReLU_3', (64, ))) small_net.add_node(nodes.FullyConnectedNode('Linear_4', (64, ), 32)) small_net.add_node(nodes.BatchNormNode('BatchNorm_5', (32, ))) small_net.add_node(nodes.ReLUNode('ReLU_6', (32, ))) small_net.add_node(nodes.FullyConnectedNode('Linear_7', (32, ), 10)) onnx_net = conv.ONNXConverter().from_neural_network(small_net) onnx.save(onnx_net.onnx_network, "FMNIST_Example.onnx") # Loading of the dataset of interest transform = tr.Compose([ tr.ToTensor(), tr.Normalize(1, 0.5),
def combine_batchnorm1d_net( network: networks.SequentialNetwork) -> networks.SequentialNetwork: """ Utilities function to combine all the FullyConnectedNodes followed by BatchNorm1DNodes in corresponding FullyConnectedNodes. Parameters ---------- network : SequentialNetwork Sequential Network of interest of which we want to combine the nodes. Return ---------- SequentialNetwork Corresponding Sequential Network with the combined nodes. """ if not network.up_to_date: for alt_rep in network.alt_rep_cache: if alt_rep.up_to_date: if isinstance(alt_rep, cv.PyTorchNetwork): pytorch_cv = cv.PyTorchConverter() network = pytorch_cv.to_neural_network(alt_rep) elif isinstance(alt_rep, cv.ONNXNetwork): onnx_cv = cv.ONNXConverter network = onnx_cv.to_neural_network(alt_rep) else: raise NotImplementedError break combined_network = networks.SequentialNetwork(network.identifier + '_combined') current_node = network.get_first_node() node_index = 1 while network.get_next_node( current_node) is not None and current_node is not None: next_node = network.get_next_node(current_node) if isinstance(current_node, nodes.FullyConnectedNode) and isinstance( next_node, nodes.BatchNorm1DNode): combined_node = combine_batchnorm1d(current_node, next_node) combined_node.identifier = f"Combined_Linear_{node_index}" combined_network.add_node(combined_node) next_node = network.get_next_node(next_node) elif isinstance(current_node, nodes.FullyConnectedNode): identifier = f"Linear_{node_index}" new_node = nodes.FullyConnectedNode(identifier, current_node.in_features, current_node.out_features, current_node.weight, current_node.bias) combined_network.add_node(new_node) elif isinstance(current_node, nodes.ReLUNode): identifier = f"ReLU_{node_index}" new_node = nodes.ReLUNode(identifier, current_node.num_features) combined_network.add_node(new_node) else: raise NotImplementedError node_index += 1 current_node = next_node if isinstance(current_node, nodes.FullyConnectedNode): identifier = f"Linear_{node_index}" new_node = nodes.FullyConnectedNode(identifier, current_node.in_features, current_node.out_features, current_node.weight, current_node.bias) combined_network.add_node(new_node) elif isinstance(current_node, nodes.ReLUNode): identifier = f"ReLU_{node_index}" new_node = nodes.ReLUNode(identifier, current_node.num_features) combined_network.add_node(new_node) else: raise NotImplementedError return combined_network
def to_neural_network(self, alt_rep: ONNXNetwork) -> networks.NeuralNetwork: """ Convert the ONNX representation of interest to the internal one. Parameters ---------- alt_rep : ONNXNetwork The ONNX Representation to convert. Returns ---------- NeuralNetwork The Neural Network resulting from the conversion of ONNX Representation. """ identifier = alt_rep.identifier.replace("_onnx", "") network = networks.SequentialNetwork(identifier) parameters = {} for initializer in alt_rep.onnx_network.graph.initializer: parameters[initializer.name] = onnx.numpy_helper.to_array( initializer) shape_info = {} for value_info in alt_rep.onnx_network.graph.value_info: shape = [] for dim in value_info.type.tensor_type.shape.dim: shape.append(dim.dim_value) shape_info[value_info.name] = shape node_index = 1 for node in alt_rep.onnx_network.graph.node: if node.op_type == "Relu": # We assume that the real input of the node is always the first element of node.input # and that the shape is [batch_placeholder, real_size] for the inputs. num_features = shape_info[node.input[0]][1] network.add_node( nodes.ReLUNode(f"ReLU_{node_index}", num_features)) elif node.op_type == "Sigmoid": num_features = shape_info[node.input[0]][1] network.add_node( nodes.SigmoidNode(f"Sigmoid_{node_index}", num_features)) elif node.op_type == "Gemm": # We assume that the weight tensor is always the second element of node.input and the bias tensor # is always the third. # N.B: The Marabou procedure for reading ONNX models do not consider the attributes transA and transB, # therefore we need to transpose the weight vector. weight = parameters[node.input[1]].T bias = parameters[node.input[2]] in_features = weight.shape[1] out_features = weight.shape[0] network.add_node( nodes.FullyConnectedNode(f"Linear_{node_index}", in_features, out_features, weight, bias)) elif node.op_type == "BatchNormalization": # We assume that the real input is always the first element of node.input, the weight tensor # is always the second, the bias tensor is always the third, the running_mean always the fourth # and the running_var always the fifth. num_features = shape_info[node.input[0]][1] weight = parameters[node.input[1]] bias = parameters[node.input[2]] running_mean = parameters[node.input[3]] running_var = parameters[node.input[4]] # We assume that eps is always the first attribute and momentum is always the second. eps = node.attribute[0].f momentum = node.attribute[1].f network.add_node( nodes.BatchNorm1DNode(f"BatchNorm_{node_index}", num_features, weight, bias, running_mean, running_var, eps, momentum)) else: raise NotImplementedError node_index += 1 return network
def to_neural_network(self, alt_rep: PyTorchNetwork) -> networks.NeuralNetwork: """ Convert the PyTorch representation of interest to the internal one. Parameters ---------- alt_rep : PyTorchNetwork The PyTorch Representation to convert. Returns ---------- NeuralNetwork The Neural Network resulting from the conversion of PyTorch Representation. """ identifier = alt_rep.identifier.replace('_pytorch', '') network = networks.SequentialNetwork(identifier=identifier) node_index = 0 size_prev_output = 0 alt_rep.pytorch_network.cpu() for m in alt_rep.pytorch_network.modules(): new_node = None if isinstance(m, torch.nn.ReLU): new_node = nodes.ReLUNode( identifier='ReLU_{}'.format(node_index), num_features=size_prev_output) elif isinstance(m, torch.nn.Sigmoid): new_node = nodes.SigmoidNode( identifier='Sigmoid_{}'.format(node_index), num_features=size_prev_output) elif isinstance(m, torch.nn.Linear): in_features = m.in_features out_features = m.out_features weight = m.weight.detach().numpy() bias = m.bias.detach().numpy() new_node = nodes.FullyConnectedNode( identifier='FullyConnected_{}'.format(node_index), in_features=in_features, out_features=out_features, weight=weight, bias=bias) size_prev_output = out_features elif isinstance(m, torch.nn.BatchNorm1d): num_features = m.num_features eps = m.eps momentum = m.momentum track_running_stats = m.track_running_stats affine = m.affine weight = m.weight.detach().numpy() bias = m.bias.detach().numpy() running_mean = m.running_mean.numpy() running_var = m.running_var.numpy() new_node = nodes.BatchNorm1DNode( identifier='BatchNorm1D_{}'.format(node_index), num_features=num_features, weight=weight, bias=bias, running_mean=running_mean, running_var=running_var, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) size_prev_output = num_features if new_node is not None: node_index += 1 network.add_node(new_node) return network