Esempio n. 1
0
def convert(source_file,
            target_file,
            trim_unused_by_output="",
            verbose=False,
            compress_f16=False):
    """
    Converts a ONNX model into a Barracuda model.
    :param source_file: The ONNX Model
    :param target_file: The name of the file the converted model will be saved to
    :param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
    :param verbose: If True, will display debug messages
    :param compress_f16: If true, the float values will be converted to f16
    :return:
    """
    if (type(verbose) == bool):
        args = Struct()
        args.verbose = verbose
        args.print_layers = verbose
        args.print_source_json = verbose
        args.print_barracuda_json = verbose
        args.print_layer_links = verbose
        args.print_patterns = verbose
        args.print_tensors = verbose
    else:
        args = verbose

    # Load ONNX model
    print("Converting %s to %s" % (source_file, target_file))
    i_model = onnx.load(source_file)

    if args.print_source_json or args.verbose:
        for layer in i_model.graph.node:
            print(MessageToJson(layer) + ",")

    # Convert
    o_model = barracuda.Model()
    o_model.layers, o_input_shapes, o_model.tensors, o_model.memories = \
        process_model(i_model, args)

    # Trim
    if trim_unused_by_output:
        o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output,
                                        args.verbose)

    # Find model inputs & outputs
    all_inputs = {i for l in o_model.layers for i in l.inputs}

    # Create load layers for constants
    const_tensors = [i for i in all_inputs if i in o_model.tensors]
    const_tensors += o_model.globals
    for x in const_tensors:
        shape = adapt_input_shape(o_model.tensors[x].dims) if hasattr(
            o_model.tensors[x],
            'dims') and len(o_model.tensors[x].dims) > 0 else [1, 1, 1, 1]

        o_l = Struct(
            type=255,  # Load
            class_name="Const",
            name=x,
            pads=[0, 0, 0, 0],
            strides=[],
            pool_size=[],
            axis=-1,
            alpha=1,
            beta=0,
            activation=0,
            inputs=[],
            tensors=[
                Struct(name=x,
                       shape=shape,
                       data=np.reshape(get_tensor_data(o_model.tensors[x]),
                                       shape).astype(np.float32))
            ])
        o_model.layers.insert(0, o_l)

    all_layers = {l.name for l in o_model.layers}

    # global inputs - are inputs that are NOT connected to any layer in the network
    # global outputs - are outputs that are NOT feeding any layer in the network
    o_model.inputs = {
        i: o_input_shapes[i]
        for l in o_model.layers for i in l.inputs if i not in all_layers
    }
    o_model.outputs = [
        l.name for l in o_model.layers if l.name not in all_inputs
    ]

    # Compress
    if compress_f16:
        o_model = barracuda.compress(o_model)

    # Summary
    barracuda.summary(o_model,
                      print_layer_links=args.print_layer_links or args.verbose,
                      print_barracuda_json=args.print_barracuda_json
                      or args.verbose,
                      print_tensors=args.print_tensors or args.verbose)

    # Write to file
    barracuda.write(o_model, target_file)
    print('DONE: wrote', target_file, 'file.')
Esempio n. 2
0
def convert(source_file,
            target_file,
            trim_unused_by_output="",
            verbose=False,
            compress_f16=False):
    """
    Converts a Keras model into a Barracuda model.
    :param source_file: The Keras Model
    :param target_file: The name of the file the converted model will be saved to
    :param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
    :param verbose: If True, will display debug messages
    :param compress_f16: If true, the float values will be converted to f16
    :return:
    """
    if (type(verbose) == bool):
        args = Struct()
        args.verbose = verbose
        args.print_layers = verbose
        args.print_source_json = verbose
        args.print_barracuda_json = verbose
        args.print_layer_links = verbose
        args.print_patterns = verbose
        args.print_tensors = verbose
    else:
        args = verbose

    if args.print_supported_ops:
        barracuda.print_known_operations(known_classes, known_activations)

    # Load Keras model
    print("Converting %s to %s" % (source_file, target_file))
    i_model = h5py.File(source_file, 'r')

    configJSON = json.loads(i_model.attrs['model_config'].decode('utf-8'))
    layers = configJSON['config']
    model_tensors = i_model['model_weights']

    if args.print_source_json or args.verbose:
        pprint(configJSON)

    # Convert
    o_model = barracuda.Model()
    o_model.layers, o_input_shapes, o_model.memories = \
        process_model(layers, model_tensors, args)

    # Gather patched model tensors
    for l in o_model.layers:
        for x in l.tensors:
            o_model.tensors[x.name] = x

    # Trim
    if trim_unused_by_output:
        o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output,
                                        args.verbose)

    # Find model inputs & outputs
    all_layers = {l.name for l in o_model.layers}
    all_inputs = {i for l in o_model.layers for i in l.inputs}
    # global inputs - are inputs that are NOT connected to any layer in the network
    # global outputs - are outputs that are NOT feeding any layer in the network
    o_model.inputs = {
        i: o_input_shapes[i]
        for l in o_model.layers for i in l.inputs if i not in all_layers
    }
    o_model.outputs = [
        l.name for l in o_model.layers if l.name not in all_inputs
    ]

    # Compress
    if compress_f16:
        o_model = barracuda.compress(o_model)

    # Summary
    barracuda.summary(o_model,
                      print_layer_links=args.print_layer_links or args.verbose,
                      print_barracuda_json=args.print_barracuda_json
                      or args.verbose,
                      print_tensors=args.print_tensors or args.verbose)

    # Write to file
    barracuda.write(o_model, target_file)
    print('DONE: wrote', target_file, 'file.')
Esempio n. 3
0
def convert(source_file,
            target_file,
            trim_unused_by_output="",
            verbose=False,
            compress_f16=False):
    """
    Converts a ONNX model into a Barracuda model.
    :param source_file: The ONNX Model
    :param target_file: The name of the file the converted model will be saved to
    :param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
    :param verbose: If True, will display debug messages
    :param compress_f16: If true, the float values will be converted to f16
    :return:
    """
    if (type(verbose) == bool):
        args = Struct()
        args.verbose = verbose
        args.print_layers = verbose
        args.print_source_json = verbose
        args.print_barracuda_json = verbose
        args.print_layer_links = verbose
        args.print_patterns = verbose
        args.print_tensors = verbose
    else:
        args = verbose

    if args.print_supported_ops:
        barracuda.print_known_operations(known_classes, known_activations)

    # Load ONNX model
    print("Converting %s to %s" % (source_file, target_file))
    i_model = onnx.load(source_file)

    if args.print_source_json or args.verbose:
        for layer in i_model.graph.node:
            print(MessageToJson(layer) + ",")

    # Convert
    o_model = barracuda.Model()
    o_model.layers, o_input_shapes, o_model.tensors, o_model.memories, o_model.globals = \
        process_model(i_model, args)

    # Trim
    if trim_unused_by_output:
        o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output,
                                        args.verbose)

    # Create load layers for constants
    def dims_to_barracuda_shape(tensor):
        if hasattr(tensor, 'dims') and len(tensor.dims) > 0:
            return adapt_input_shape(tensor.dims)
        return [1, 1, 1, 1]

    barracuda.setup_constants(o_model,
                              lambda tensor: dims_to_barracuda_shape(tensor),
                              lambda tensor: get_tensor_data(tensor))

    # Find model inputs & outputs
    all_inputs = {i for l in o_model.layers for i in l.inputs}
    all_layers = {l.name for l in o_model.layers}

    # global inputs - are inputs that are NOT connected to any layer in the network
    # global outputs - are outputs that are NOT feeding any layer in the network
    o_model.inputs = {
        i: o_input_shapes[i]
        for l in o_model.layers for i in l.inputs if i not in all_layers
    }

    def is_output_layer(layer):
        if layer.name in all_inputs:  # Only layers that do not input to other layers can count as global output
            return False
        if layer.name in o_model.globals:
            return False
        return True

    o_model.outputs = [l.name for l in o_model.layers if is_output_layer(l)]

    # Compress
    if compress_f16:
        o_model = barracuda.compress(o_model)

    # Sort model so that layer inputs are always ready upfront
    o_model.layers = barracuda.sort(o_model.layers, o_model.inputs,
                                    o_model.memories, args.verbose)
    o_model.layers = barracuda.fuse(o_model.layers, args.verbose)

    # Summary
    barracuda.summary(o_model,
                      print_layer_links=args.print_layer_links or args.verbose,
                      print_barracuda_json=args.print_barracuda_json
                      or args.verbose,
                      print_tensors=args.print_tensors or args.verbose)

    # Write to file
    barracuda.write(o_model, target_file)
    print('DONE: wrote', target_file, 'file.')