def fully_connected_node_test(converter: conversion.ConversionStrategy, has_bias: bool): print("FULLY CONNECTED NODE TEST") start_network = network.SequentialNetwork("NET_TEST", "X") start_network.add_node( nodes.FullyConnectedNode("FullyConnected_1", (3, 4, 5), 5, has_bias=has_bias)) alt_network = converter.from_neural_network(start_network) end_network = converter.to_neural_network(alt_network) assert isinstance(end_network, network.SequentialNetwork) start_node = start_network.get_first_node() end_node = end_network.get_first_node() assert isinstance(start_node, nodes.FullyConnectedNode) assert isinstance(end_node, nodes.FullyConnectedNode) assert start_node.in_dim == end_node.in_dim assert start_node.out_dim == end_node.out_dim assert start_node.in_features == end_node.in_features assert start_node.out_features == end_node.out_features assert (start_node.weight == end_node.weight).all() if start_node.bias is not None: assert (start_node.bias == end_node.bias).all() else: assert start_node.bias is None and end_node.bias is None assert start_node.has_bias == end_node.has_bias assert start_node.identifier == end_node.identifier
def combine_batchnorm1d( linear: nodes.FullyConnectedNode, batchnorm: nodes.BatchNorm1DNode) -> nodes.FullyConnectedNode: """ Utility function to combine a BatchNorm1DNode node with a FullyConnectedNode in a corresponding FullyConnectedNode. Parameters ---------- linear : FullyConnectedNode FullyConnectedNode to combine. batchnorm : BatchNorm1DNode BatchNorm1DNode to combine. Return ---------- FullyConnectedNode The FullyConnectedNode resulting from the fusion of the two input nodes. """ l_weight = torch.from_numpy(linear.weight) l_bias = torch.from_numpy(linear.bias) bn_running_mean = torch.from_numpy(batchnorm.running_mean) bn_running_var = torch.from_numpy(batchnorm.running_var) bn_weight = torch.from_numpy(batchnorm.weight) bn_bias = torch.from_numpy(batchnorm.bias) bn_eps = batchnorm.eps fused_bias = torch.div(bn_weight, torch.sqrt(bn_running_var + bn_eps)) fused_bias = torch.mul(fused_bias, torch.sub(l_bias, bn_running_mean)) fused_bias = torch.add(fused_bias, bn_bias) fused_weight = torch.diag( torch.div(bn_weight, torch.sqrt(bn_running_var + bn_eps))) fused_weight = torch.matmul(fused_weight, l_weight) fused_linear = nodes.FullyConnectedNode(linear.identifier, linear.in_features, linear.out_features, fused_weight.numpy(), fused_bias.numpy()) return fused_linear
if property_ids[i] == "Property 1": out_pred_bias = (np.array(unsafe_vecs[i]) - outputMean) / outputRange else: out_pred_bias = np.array(unsafe_vecs[i]) # Construction of our internal representation for the ACAS network. network = networks.SequentialNetwork( f"ACAS_XU_{networks_ids[i][j]}", "X") for k in range(len(weights)): new_fc_node = nodes.FullyConnectedNode(f"FC_{k}", (weights[k].shape[1], ), weights[k].shape[0], weights[k], biases[k], True) network.add_node(new_fc_node) if k < len(weights) - 1: new_relu_node = nodes.ReLUNode(f"ReLU_{k}", (weights[k].shape[0], )) network.add_node(new_relu_node) # Verification of the network of interest for the property of interest prop = ver.NeVerProperty(in_pred_mat, in_pred_bias, [out_pred_mat], [out_pred_bias]) net_id = networks_ids[i][j] p_id = property_ids[i]
import torch import copy import logging # Logger Setup logger = logging.getLogger("pynever") logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) logger.addHandler(ch) # Building of the network of interest small_net = networks.SequentialNetwork("SmallNetwork", "X") small_net.add_node(nodes.FullyConnectedNode('Linear_1', (784, ), 64)) small_net.add_node(nodes.BatchNormNode('BatchNorm_2', (64, ))) small_net.add_node(nodes.ReLUNode('ReLU_3', (64, ))) small_net.add_node(nodes.FullyConnectedNode('Linear_4', (64, ), 32)) small_net.add_node(nodes.BatchNormNode('BatchNorm_5', (32, ))) small_net.add_node(nodes.ReLUNode('ReLU_6', (32, ))) small_net.add_node(nodes.FullyConnectedNode('Linear_7', (32, ), 10)) onnx_net = conv.ONNXConverter().from_neural_network(small_net) onnx.save(onnx_net.onnx_network, "FMNIST_Example.onnx") # Loading of the dataset of interest transform = tr.Compose([ tr.ToTensor(), tr.Normalize(1, 0.5), tr.Lambda(lambda x: torch.flatten(x))
def combine_batchnorm1d_net( network: networks.SequentialNetwork) -> networks.SequentialNetwork: """ Utilities function to combine all the FullyConnectedNodes followed by BatchNorm1DNodes in corresponding FullyConnectedNodes. Parameters ---------- network : SequentialNetwork Sequential Network of interest of which we want to combine the nodes. Return ---------- SequentialNetwork Corresponding Sequential Network with the combined nodes. """ if not network.up_to_date: for alt_rep in network.alt_rep_cache: if alt_rep.up_to_date: if isinstance(alt_rep, cv.PyTorchNetwork): pytorch_cv = cv.PyTorchConverter() network = pytorch_cv.to_neural_network(alt_rep) elif isinstance(alt_rep, cv.ONNXNetwork): onnx_cv = cv.ONNXConverter network = onnx_cv.to_neural_network(alt_rep) else: raise NotImplementedError break combined_network = networks.SequentialNetwork(network.identifier + '_combined') current_node = network.get_first_node() node_index = 1 while network.get_next_node( current_node) is not None and current_node is not None: next_node = network.get_next_node(current_node) if isinstance(current_node, nodes.FullyConnectedNode) and isinstance( next_node, nodes.BatchNorm1DNode): combined_node = combine_batchnorm1d(current_node, next_node) combined_node.identifier = f"Combined_Linear_{node_index}" combined_network.add_node(combined_node) next_node = network.get_next_node(next_node) elif isinstance(current_node, nodes.FullyConnectedNode): identifier = f"Linear_{node_index}" new_node = nodes.FullyConnectedNode(identifier, current_node.in_features, current_node.out_features, current_node.weight, current_node.bias) combined_network.add_node(new_node) elif isinstance(current_node, nodes.ReLUNode): identifier = f"ReLU_{node_index}" new_node = nodes.ReLUNode(identifier, current_node.num_features) combined_network.add_node(new_node) else: raise NotImplementedError node_index += 1 current_node = next_node if isinstance(current_node, nodes.FullyConnectedNode): identifier = f"Linear_{node_index}" new_node = nodes.FullyConnectedNode(identifier, current_node.in_features, current_node.out_features, current_node.weight, current_node.bias) combined_network.add_node(new_node) elif isinstance(current_node, nodes.ReLUNode): identifier = f"ReLU_{node_index}" new_node = nodes.ReLUNode(identifier, current_node.num_features) combined_network.add_node(new_node) else: raise NotImplementedError return combined_network
import pynever.networks as networks import pynever.nodes as nodes import pynever.utilities as util import pynever.datasets as dt import pynever.strategies.training as training import pynever.strategies.pruning as pruning import pynever.strategies.conversion as conversion import copy # Building of the network of interest small_net = networks.SequentialNetwork("SmallNetwork") small_net.add_node(nodes.FullyConnectedNode('Linear_1', 784, 64)) small_net.add_node(nodes.BatchNorm1DNode('BatchNorm_2', 64)) small_net.add_node(nodes.ReLUNode('ReLU_3', 64)) small_net.add_node(nodes.FullyConnectedNode('Linear_4', 64, 32)) small_net.add_node(nodes.BatchNorm1DNode('BatchNorm_5', 32)) small_net.add_node(nodes.ReLUNode('ReLU_6', 32)) small_net.add_node(nodes.FullyConnectedNode('Linear_7', 32, 16)) small_net.add_node(nodes.BatchNorm1DNode('BatchNorm_8', 16)) small_net.add_node(nodes.ReLUNode('ReLU_9', 16)) small_net.add_node(nodes.FullyConnectedNode('Linear_10', 16, 10)) # Loading of the dataset of interest dataset = dt.MNISTDataset() # Initialization of the training and pruning parameters cuda = False # If possible the experiment should be run with cuda, otherwise it will take quite some time. epochs = 100 train_batch_size = 128 test_batch_size = 64 learning_rate = 0.1
def to_neural_network(self, alt_rep: PyTorchNetwork) -> networks.NeuralNetwork: """ Convert the PyTorch representation of interest to the internal one. Parameters ---------- alt_rep : PyTorchNetwork The PyTorch Representation to convert. Returns ---------- NeuralNetwork The Neural Network resulting from the conversion of PyTorch Representation. """ identifier = alt_rep.identifier.replace('_pytorch', '') network = networks.SequentialNetwork(identifier=identifier) node_index = 0 size_prev_output = 0 alt_rep.pytorch_network.cpu() for m in alt_rep.pytorch_network.modules(): new_node = None if isinstance(m, torch.nn.ReLU): new_node = nodes.ReLUNode( identifier='ReLU_{}'.format(node_index), num_features=size_prev_output) elif isinstance(m, torch.nn.Sigmoid): new_node = nodes.SigmoidNode( identifier='Sigmoid_{}'.format(node_index), num_features=size_prev_output) elif isinstance(m, torch.nn.Linear): in_features = m.in_features out_features = m.out_features weight = m.weight.detach().numpy() bias = m.bias.detach().numpy() new_node = nodes.FullyConnectedNode( identifier='FullyConnected_{}'.format(node_index), in_features=in_features, out_features=out_features, weight=weight, bias=bias) size_prev_output = out_features elif isinstance(m, torch.nn.BatchNorm1d): num_features = m.num_features eps = m.eps momentum = m.momentum track_running_stats = m.track_running_stats affine = m.affine weight = m.weight.detach().numpy() bias = m.bias.detach().numpy() running_mean = m.running_mean.numpy() running_var = m.running_var.numpy() new_node = nodes.BatchNorm1DNode( identifier='BatchNorm1D_{}'.format(node_index), num_features=num_features, weight=weight, bias=bias, running_mean=running_mean, running_var=running_var, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) size_prev_output = num_features if new_node is not None: node_index += 1 network.add_node(new_node) return network
def to_neural_network(self, alt_rep: ONNXNetwork) -> networks.NeuralNetwork: """ Convert the ONNX representation of interest to the internal one. Parameters ---------- alt_rep : ONNXNetwork The ONNX Representation to convert. Returns ---------- NeuralNetwork The Neural Network resulting from the conversion of ONNX Representation. """ identifier = alt_rep.identifier.replace("_onnx", "") network = networks.SequentialNetwork(identifier) parameters = {} for initializer in alt_rep.onnx_network.graph.initializer: parameters[initializer.name] = onnx.numpy_helper.to_array( initializer) shape_info = {} for value_info in alt_rep.onnx_network.graph.value_info: shape = [] for dim in value_info.type.tensor_type.shape.dim: shape.append(dim.dim_value) shape_info[value_info.name] = shape node_index = 1 for node in alt_rep.onnx_network.graph.node: if node.op_type == "Relu": # We assume that the real input of the node is always the first element of node.input # and that the shape is [batch_placeholder, real_size] for the inputs. num_features = shape_info[node.input[0]][1] network.add_node( nodes.ReLUNode(f"ReLU_{node_index}", num_features)) elif node.op_type == "Sigmoid": num_features = shape_info[node.input[0]][1] network.add_node( nodes.SigmoidNode(f"Sigmoid_{node_index}", num_features)) elif node.op_type == "Gemm": # We assume that the weight tensor is always the second element of node.input and the bias tensor # is always the third. # N.B: The Marabou procedure for reading ONNX models do not consider the attributes transA and transB, # therefore we need to transpose the weight vector. weight = parameters[node.input[1]].T bias = parameters[node.input[2]] in_features = weight.shape[1] out_features = weight.shape[0] network.add_node( nodes.FullyConnectedNode(f"Linear_{node_index}", in_features, out_features, weight, bias)) elif node.op_type == "BatchNormalization": # We assume that the real input is always the first element of node.input, the weight tensor # is always the second, the bias tensor is always the third, the running_mean always the fourth # and the running_var always the fifth. num_features = shape_info[node.input[0]][1] weight = parameters[node.input[1]] bias = parameters[node.input[2]] running_mean = parameters[node.input[3]] running_var = parameters[node.input[4]] # We assume that eps is always the first attribute and momentum is always the second. eps = node.attribute[0].f momentum = node.attribute[1].f network.add_node( nodes.BatchNorm1DNode(f"BatchNorm_{node_index}", num_features, weight, bias, running_mean, running_var, eps, momentum)) else: raise NotImplementedError node_index += 1 return network