Ejemplo n.º 1
0
def normalizeNNet(readNNetFile, writeNNetFile=None):
    weights, biases, inputMins, inputMaxes, means, ranges = readNNet(
        readNNetFile, withNorm=True)

    numInputs = weights[0].shape[0]
    numOutputs = weights[-1].shape[1]

    # Adjust weights and biases of first layer
    for i in range(numInputs):
        weights[0][i, :] /= ranges[i]
    biases[0] -= np.matmul(weights[0].T, means[:-1])

    # Adjust weights and biases of last layer
    weights[-1] *= ranges[-1]
    biases[-1] *= ranges[-1]
    biases[-1] += means[-1]

    # Nominal mean and range vectors
    means = np.zeros(numInputs + 1)
    ranges = np.ones(numInputs + 1)

    if writeNNetFile is not None:
        writeNNet(weights, biases, inputMins, inputMaxes, means, ranges,
                  writeNNetFile)
        return None
    return weights, biases
Ejemplo n.º 2
0
def nnet2pb(nnetFile,
            pbFile="",
            output_node_names="y_out",
            normalizeNetwork=False):
    '''
    Read a .nnet file and create a frozen Tensorflow graph and save to a .pb file
    
    Args:
        nnetFile (str): A .nnet file to convert to Tensorflow format
        pbFile (str, optional): Name for the created .pb file. Default: ""
        output_node_names (str, optional): Name of the final operation in the Tensorflow graph. Default: "y_out"
    '''
    if normalizeNetwork:
        weights, biases = normalizeNNet(nnetFile)
    else:
        weights, biases = readNNet(nnetFile)
    inputSize = weights[0].shape[1]

    # Default pb filename if none are specified
    if pbFile == "":
        pbFile = nnetFile[:-4] + 'pb'

    # Reset tensorflow and load a session using only CPUs
    tf.reset_default_graph()
    sess = tf.Session()

    # Define model and assign values to tensors
    currentTensor = tf.placeholder(tf.float32, [None, inputSize], name='input')
    for i in range(len(weights)):
        W = tf.get_variable("W%d" % i, shape=weights[i].T.shape)
        b = tf.get_variable("b%d" % i, shape=biases[i].shape)

        # Use ReLU for all but last operation, and name last operation to desired name
        if i != len(weights) - 1:
            currentTensor = tf.nn.relu(tf.matmul(currentTensor, W) + b)
        else:
            currentTensor = tf.add(tf.matmul(currentTensor, W),
                                   b,
                                   name=output_node_names)

        # Assign values to tensors
        sess.run(tf.assign(W, weights[i].T))
        sess.run(tf.assign(b, biases[i]))

    # Freeze the graph to write the pb file
    freeze_graph(sess, pbFile, output_node_names)
Ejemplo n.º 3
0
    def test_read(self):

        nnetFile = "nnet/TestNetwork.nnet"
        testInput = np.array([1.0, 1.0, 1.0, 100.0, 1.0]).astype(np.float32)
        nnet = NNet(nnetFile)
        weights, biases, inputMins, inputMaxes, means, ranges = readNNet(
            nnetFile, withNorm=True)

        self.assertTrue(len(weights) == len(nnet.weights))
        self.assertTrue(len(biases) == len(nnet.biases))
        self.assertTrue(len(inputMins) == len(nnet.mins))
        self.assertTrue(len(inputMaxes) == len(nnet.maxes))
        self.assertTrue(len(means) == len(nnet.means))
        self.assertTrue(len(ranges) == len(nnet.ranges))
        for w1, w2 in zip(weights, nnet.weights):
            self.assertTrue(np.all(w1 == w2))
        for b1, b2 in zip(biases, nnet.biases):
            self.assertTrue(np.all(b1 == b2))
        self.assertTrue(np.all(inputMins == nnet.mins))
        self.assertTrue(np.all(inputMaxes == nnet.maxes))
        self.assertTrue(np.all(means == nnet.means))
        self.assertTrue(np.all(ranges == nnet.ranges))
Ejemplo n.º 4
0
def nnet2onnx(nnetFile,
              onnxFile="",
              outputVar="y_out",
              inputVar="X",
              normalizeNetwork=False):
    '''
    Convert a .nnet file to onnx format
    Args:
        nnetFile: (string) .nnet file to convert to onnx
        onnxFile: (string) Optional, name for the created .onnx file
        outputName: (string) Optional, name of the output variable in onnx
        normalizeNetwork: (bool) If true, adapt the network weights and biases so that 
                                 networks and inputs do not need to be normalized. Default is False.
    '''
    if normalizeNetwork:
        weights, biases = normalizeNNet(nnetFile)
    else:
        weights, biases = readNNet(nnetFile)

    inputSize = weights[0].shape[1]
    outputSize = weights[-1].shape[0]
    numLayers = len(weights)

    # Default onnx filename if none specified
    if onnxFile == "":
        onnxFile = nnetFile[:-4] + 'onnx'

    # Initialize graph
    inputs = [
        helper.make_tensor_value_info(inputVar, TensorProto.FLOAT, [inputSize])
    ]
    outputs = [
        helper.make_tensor_value_info(outputVar, TensorProto.FLOAT,
                                      [outputSize])
    ]
    operations = []
    initializers = []

    # Loop through each layer of the network and add operations and initializers
    for i in range(numLayers):

        # Use outputVar for the last layer
        outputName = "H%d" % i
        if i == numLayers - 1:
            outputName = outputVar

        # Weight matrix multiplication
        operations.append(
            helper.make_node("MatMul", ["W%d" % i, inputVar], ["M%d" % i]))
        initializers.append(
            numpy_helper.from_array(weights[i].astype(np.float32),
                                    name="W%d" % i))

        # Bias add
        operations.append(
            helper.make_node("Add", ["M%d" % i, "B%d" % i], [outputName]))
        initializers.append(
            numpy_helper.from_array(biases[i].astype(np.float32),
                                    name="B%d" % i))

        # Use Relu activation for all layers except the last layer
        if i < numLayers - 1:
            operations.append(
                helper.make_node("Relu", ["H%d" % i], ["R%d" % i]))
            inputVar = "R%d" % i

    # Create the graph and model in onnx
    graph_proto = helper.make_graph(operations, "nnet2onnx_Model", inputs,
                                    outputs, initializers)
    model_def = helper.make_model(graph_proto)

    # Print statements
    print("Converted NNet model at %s" % nnetFile)
    print("    to an ONNX model at %s" % onnxFile)

    # Additional print statements if desired
    #print("\nReadable GraphProto:\n")
    #print(helper.printable_graph(graph_proto))

    # Save the ONNX model
    onnx.save(model_def, onnxFile)