Exemplo n.º 1
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            keras.layers.InputLayer: self.convert_input_layer,
            keras.layers.Flatten: self.convert_flatten,
            keras.layers.Dense: self.convert_dense,
            keras.layers.Conv2D: self.convert_conv2d,
            keras.layers.MaxPooling2D: self.convert_maxpooling2d,
            keras.layers.Dropout: self.convert_dropout,
            keras.layers.DepthwiseConv2D: self.convert_depthwise_conv2d,
            keras.layers.Softmax: self.convert_softmax,
            keras.layers.BatchNormalization: self.convert_batch_normalization,
            keras.layers.Activation: self.convert_activation,
            keras.layers.GlobalAveragePooling2D:
            self.convert_global_average_pooling2d,
            keras.layers.Add: self.convert_add,
            QuantizeLayer: self.convert_quantize_layer,
            QuantizeWrapper: self.convert_quantize_wrapper,
        }

        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)

        with tfmot.quantization.keras.quantize_scope():
            self._keras_model = keras.models.load_model(src_model_file,
                                                        compile=False)
Exemplo n.º 2
0
def run_layers_validate(flags, args, original_conf):
    model_name = flags.model_name
    original_model_dir = flags.output + "/" + \
        original_conf['library_name'] + "/model"
    model_dir = "/tmp/micro_run/model"
    device.execute("mkdir -p %s" % model_dir)
    device.execute("cp -p %s/%s.pb %s" %
                   (original_model_dir, model_name, model_dir))
    params_file_path = "%s/%s.data" % (original_model_dir, model_name)
    output_configs = layers_validate.get_layers(model_dir, model_name,
                                                flags.layers)

    for i in range(len(output_configs)):
        sub_model_conf = gen_sub_model_conf(output_configs[i], flags,
                                            original_conf)
        with open(output_configs[i]['model_file_path'], "rb") as model_file:
            net_def = mace_pb2.NetDef()
            net_def.ParseFromString(model_file.read())
            with open(params_file_path, "rb") as params_file:
                weights = bytearray(params_file.read())
                micro_conf = \
                    config_parser.normalize_model_config(sub_model_conf)
                MicroConverter(micro_conf, net_def, weights,
                               model_name).gen_code()
                build_engine(model_name, micro_conf[ModelKeys.data_type])
                run_model_with_conf(flags, args, model_name, micro_conf)
Exemplo n.º 3
0
    def __init__(self, option, src_model_file, src_weight_file):
        self._op_converters = {
            'Input': self.convert_nop,
            'Convolution': self.convert_conv2d,
            'Deconvolution': self.convert_deconv2d,
            'Eltwise': self.convert_elementwise,
            'Add': self.convert_add,
            'ReLU': self.convert_activation,
            'ReLU6': self.convert_activation,
            'TanH': self.convert_activation,
            'Sigmoid': self.convert_activation,
            'PReLU': self.convert_activation,
            'Clip': self.convert_activation,
            'Pooling': self.convert_pooling,
            'Concat': self.convert_concat,
            'Slice': self.convert_slice,
            'Softmax': self.convert_softmax,
            'InnerProduct': self.convert_fully_connected,
            'Interp': self.convert_interp,
            'BatchNorm': self.convert_folded_batchnorm,
            'GroupNorm': self.convert_group_norm,
            'Crop': self.convert_crop,
            'Scale': self.convert_scale,
            'ShuffleChannel': self.convert_channel_shuffle,
            'Permute': self.convert_permute,
            'Flatten': self.convert_flatten,
            'PriorBox': self.convert_prior_box,
            'Reshape': self.convert_reshape,
            'L2Normalization': self.convert_lpnorm,
            'L1Normalization': self.convert_lpnorm,
            'MVN': self.convert_MVN,
            'Bias': self.convert_bias,
            'ArgMax': self.convert_argmax,
            'ResizeNearest': self.convert_resize_nearest,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
        self._caffe_net = CaffeNet()
        self._caffe_layers = caffe_pb2.NetParameter()
        caffe_weights = caffe_pb2.NetParameter()

        # parse prototxt
        with open(src_model_file, 'r') as f:
            google.protobuf.text_format.Merge(
                str(f.read()), self._caffe_layers)
            self.filter_test_layers(self._caffe_layers)
            for layer in self._caffe_layers.layer:
                self._caffe_net.add_layer(layer)

        # parse model weight
        with open(src_weight_file, 'rb') as f:
            caffe_weights.ParseFromString(f.read())
            self.filter_test_layers(caffe_weights)
            for weight in caffe_weights.layer:
                self._caffe_net.add_blob(weight)

        self._skip_ops = []
Exemplo n.º 4
0
def separate_params(mace_model):
    tensors = mace_model.tensors
    params = mace_pb2.NetDef()
    params.tensors.extend(tensors)

    model = mace_model
    del model.tensors[:]
    return model, params
Exemplo n.º 5
0
    def __init__(self, option, src_model_file):
        torch._C.Node.__getitem__ = _node_getitem
        self._param_converts = (
            NodeKind.Constant,
            NodeKind.List,
            NodeKind.Size,
            NodeKind.NumToTensor,
            NodeKind.Int,
        )

        self._option = option
        self._converter_info = dict()
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
        ConverterUtil.set_framework_type(self._mace_net_def,
                                         FrameworkType.PYTORCH.value)
        self._op_converters = {
            NodeKind.AdaptiveAvgPool2D: self.convert_pool,
            NodeKind.Add: self.convert_add,
            NodeKind.Add_: self.convert_add,
            NodeKind.Addmm: self.convert_addmm,
            NodeKind.AvgPool2D: self.convert_pool,
            NodeKind.BatchNorm: self.convert_batch_norm,
            NodeKind.Cat: self.convert_cat,
            NodeKind.Convolution: self.convert_conv2d,
            NodeKind.Dropout: self.convert_dropout,
            NodeKind.Flatten: self.convert_flatten,
            NodeKind.HardTanh_: self.convert_hardtanh,
            NodeKind.HardTanh: self.convert_hardtanh,
            NodeKind.Matmul: self.convert_matmul,
            NodeKind.MaxPool2D: self.convert_pool,
            NodeKind.Relu: self.convert_relu,
            NodeKind.Relu_: self.convert_relu,
            NodeKind.Reshape: self.convert_reshape,
            NodeKind.T: self.convert_t,
        }
        self._loaded_model = torch.jit.load(src_model_file)
        self._loaded_model.eval()
        self._graph, self._params_dict = self.model_to_graph()
        self._output_node_name = list(self._graph.outputs())[0].debugName()
        self._output_value_type = list(self._graph.outputs())[0].type()
        mace_check(
            isinstance(self._output_value_type,
                       (ValueType.TensorType, ValueType.ListType,
                        ValueType.TupleType)),
            'return type {} not supported'.format(self._output_value_type))
        self._node_map = {}
        self.init_output_shape_cache()
Exemplo n.º 6
0
def encrypt(model_name, model_file, params_file, device, output,
            is_obfuscate=False, gencode_model=False, gencode_params=False):
    model_checksum = util.file_checksum(model_file)
    params_checksum = util.file_checksum(params_file)

    with open(model_file, "rb") as model_file:
        with open(params_file, "rb") as params_file:
            model = mace_pb2.NetDef()
            model.ParseFromString(model_file.read())
            params = bytearray(params_file.read())

            if is_obfuscate:
                obfuscate_name(model)
            save_model_to_file(model_name, model, params, output)
            if gencode_model:
                save_model_to_code(model_name, model, params, model_checksum,
                                   params_checksum, device, output + "/code/",
                                   gencode_params)
Exemplo n.º 7
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            keras.layers.InputLayer: self.convert_input_layer,
            keras.layers.Flatten: self.convert_flatten,
            keras.layers.Dense: self.convert_dense,
            keras.layers.Conv2D: self.convert_conv2d,
            keras.layers.MaxPooling2D: self.convert_max_pooling2d,
            keras.layers.AveragePooling2D: self.convert_average_pooling2d,
            keras.layers.Dropout: self.convert_dropout,
            keras.layers.DepthwiseConv2D: self.convert_depthwise_conv2d,
            keras.layers.Softmax: self.convert_softmax,
            keras.layers.BatchNormalization: self.convert_batch_normalization,
            keras.layers.SeparableConv2D: self.convert_separable_conv2d,
            keras.layers.UpSampling2D: self.convert_upsampling2d,
            keras.layers.Activation: self.convert_activation,
            keras.layers.ReLU: self.convert_relu,
            keras.layers.Reshape: self.convert_reshape,
            keras.layers.Concatenate: self.convert_concatenate,
            keras.layers.GlobalAveragePooling2D:
            self.convert_global_average_pooling2d,
            keras.layers.Add: self.convert_add,
            keras.layers.Subtract: self.convert_substract,
            keras.layers.Multiply: self.convert_multiply,
            QuantizeLayer: self.convert_quantize_layer,
            QuantizeWrapper: self.convert_quantize_wrapper,
            # keras.Sequential: self.convert_sequential,
        }

        self._option = option
        self._converter_info = dict()
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)

        mace_check(
            len(option.input_nodes) == 1, "Only 1 input node is supportted")
        self._batch = list(option.input_nodes.items())[0][1].shape[0]

        with tfmot.quantization.keras.quantize_scope():
            self._keras_model = keras.models.load_model(src_model_file,
                                                        compile=False)
            self._keras_model.summary()
Exemplo n.º 8
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            MGEOpType.AxisAddRemove.name: self.convert_axisaddrm,
            MGEOpType.BatchNormForward.name: self.convert_batchnorm,
            MGEOpType.Concat.name: self.convert_concat,
            MGEOpType.ConvolutionForward.name: self.convert_conv2d,
            MGEOpType.ConvolutionBackwardData.name: self.convert_deconv2d,
            MGEOpType.Dimshuffle.name: self.convert_dimshuffle,
            MGEOpType.Elemwise.name: self.convert_elemwise,
            MGEOpType.GetVarShape.name: self.convert_shape,
            MGEOpType.Host2DeviceCopy.name: self.convert_nop,
            MGEOpType.Identity.name: self.convert_identity,
            MGEOpType.MarkNoBroadcastElemwise.name: self.convert_identity,
            MGEOpType.MatrixMul.name: self.convert_matmul,
            MGEOpType.PoolingForward.name: self.convert_pooling,
            MGEOpType.Reduce.name: self.convert_reduce,
            MGEOpType.Reshape.name: self.convert_reshape,
            MGEOpType.SharedDeviceTensor.name: self.convert_nop,
            MGEOpType.Subtensor.name: self.convert_subtensor,
        }
        self._option = option
        self._converter_info = dict()
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)

        cg, _, outputs = mgb.load_comp_graph_from_file(src_model_file)
        map_oprs, _, var2oprs, *_ = mgb.cgtools.graph_traversal(outputs)
        # prune second input of reshape
        # because it introduces several ops, may increase the overhead
        operators = mgb.cgtools.get_oprs_seq(outputs, prune_reshape=True)

        self._mge_cg = cg
        self._mge_operators = operators
        self._mge_map_oprs = map_oprs
        self._mge_var2oprs = var2oprs

        self._skip_tensors = set()
        self._bn_statistis_tensors = {}
Exemplo n.º 9
0
    def __init__(self, option, src_model_file):
        # Keep in lexicographical order
        self._op_converters = {
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Add.name: self.convert_add,
            TFOpType.AddV2.name: self.convert_add,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.BatchMatMulV2.name: self.convert_matmul,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.Cumsum.name: self.convert_cumsum,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Elu.name: self.convert_activation,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.ExtractImagePatches.name:
            self.convert_extract_image_patches,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.FusedBatchNormV2.name: self.convert_fused_batchnorm,
            TFOpType.FusedBatchNormV3.name: self.convert_fused_batchnorm,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.GatherV2.name: self.convert_gather,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.Max.name: self.convert_reduce,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.Mean.name: self.convert_reduce,
            TFOpType.Min.name: self.convert_reduce,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.NotEqual.name: self.convert_elementwise,
            TFOpType.OneHot.name: self.convert_one_hot,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.PadV2.name: self.convert_pad,
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.Prod.name: self.convert_reduce,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Select.name: self.convert_select,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Sign.name: self.convert_elementwise,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Split.name: self.convert_split,
            TFOpType.SplitV.name: self.convert_splitv,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Sum.name: self.convert_reduce,
            TFOpType.Tile.name: self.convert_tile,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)
        ConverterUtil.set_framework_type(self._mace_net_def,
                                         FrameworkType.TENSORFLOW.value)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self._skip_tensor = set()
        self._output_shape = {}

        print("Run transform_graph: %s" % TFTransformGraphOptions)
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(tf_graph_def,
                                                   option.input_nodes.keys(),
                                                   option.output_nodes.keys(),
                                                   TFTransformGraphOptions)
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        # To check optimized model, uncomment following code.
        # tf.io.write_graph(
        #     transformed_graph_def,
        #     ".",
        #     os.path.basename(src_model_file)[:-3] + "_opt.pb",
        #     as_text=False
        # )

        self.add_shape_info(transformed_graph_def)

        # reset default graph to clear earlier import
        tf.reset_default_graph()
        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
                self.update_output_shapes(session)

        # we have polluted graph with 'shape' ops, so reset it and reload it
        # again
        tf.reset_default_graph()
        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
Exemplo n.º 10
0
def convert(model_file, output_dir, layers):
    mace_check(os.path.isfile(model_file),
               "Input graph file '" + model_file + "' does not exist!")
    mace_check(os.path.isdir(output_dir),
               "Output directory '" + output_dir + "' does not exist!")
    net_def = mace_pb2.NetDef()
    with open(model_file, "rb") as f:
        net_def.ParseFromString(f.read())

    is_quantize = ConverterUtil.get_arg(net_def,
                                        MaceKeyword.mace_quantize_flag_arg_str)
    is_quantize = False if is_quantize is None else is_quantize.i == 1
    is_hexagon = False
    index = 0
    end_index = len(net_def.op)
    if is_quantize:
        while index < end_index:
            # omit op quantize
            if net_def.op[index].type == MaceOp.Quantize.name or \
                    net_def.op[index].type == \
                    HexagonOp.QuantizeINPUT_f_to_8.name or \
                    net_def.op[index].type == HexagonOp.INPUT.name:
                index += 1
            # omit op dequantize
            elif net_def.op[end_index - 1].type == MaceOp.Dequantize.name or \
                    net_def.op[end_index - 1].type == \
                    HexagonOp.DequantizeOUTPUT_8tof.name or \
                    net_def.op[end_index - 1].type == HexagonOp.OUTPUT.name:

                end_index -= 1
            else:
                break
        mace_check(
            0 < index < end_index < len(net_def.op),
            "Wrong number of op quantize(%d) or dequantize(%d)." %
            (index, len(net_def.op) - end_index))
        if net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name or \
                net_def.op[-1].type == HexagonOp.OUTPUT.name:
            is_hexagon = True

    index, end_index = handle_index(index, end_index, layers)

    data_format = net_def.output_info[0].data_format
    output_configs = {"subgraphs": []}
    while index < end_index:
        # omit BatchToSpaceND and op before that due to changed graph
        if net_def.op[index].type == MaceOp.BatchToSpaceND.name or \
                net_def.op[index].type == HexagonOp.BatchToSpaceND_8.name or \
                (index + 1 < end_index and
                 (net_def.op[index + 1].type == MaceOp.BatchToSpaceND.name or
                  net_def.op[index + 1].type == HexagonOp.BatchToSpaceND_8.name)):  # noqa
            index += 1
            continue
        net = copy.deepcopy(net_def)
        if is_hexagon:
            # reuse dequantize op and it's min/max tensor's node_id
            del net.op[index + 1:-1]
        else:
            del net.op[index + 1:]
        del net.output_info[:]
        op = net.op[index]
        index += 1

        output_tensors = []
        output_shapes = []
        op_name = op.name
        if str(op.name).startswith(MaceKeyword.mace_output_node_name):
            continue
        if is_quantize:
            op.name = MaceKeyword.mace_output_node_name + '_' + op.name
        if is_hexagon:
            if len(op.output) != 1:
                print("Skip %s(%s)" % (op.name, op.type))
                continue
        for i in range(len(op.output)):
            output_tensors.append(str(op.output[i]))
            output_shapes.append(",".join(
                [str(dim) for dim in op.output_shape[i].dims]))
            # modify output info
            output_info = net.output_info.add()
            output_info.name = op.output[i]
            output_info.data_format = data_format
            output_info.dims.extend(op.output_shape[i].dims)
            output_info.data_type = mace_pb2.DT_FLOAT
            if is_quantize:
                output_info.scale = op.quantize_info[0].scale
                output_info.zero_point = op.quantize_info[0].zero_point
            # modify output op
            if is_quantize:
                output_name = op.output[i]
                new_output_name = \
                    MaceKeyword.mace_output_node_name + '_' + op.output[i]
                op.output[i] = new_output_name
                if not is_hexagon:
                    dequantize_op = net.op.add()
                    dequantize_op.name = normalize_op_name(output_name)
                    dequantize_op.type = MaceOp.Dequantize.name
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    output_shape = dequantize_op.output_shape.add()
                    output_shape.dims.extend(op.output_shape[i].dims)
                    dequantize_op.output_type.append(mace_pb2.DT_FLOAT)
                    ConverterUtil.add_data_type_arg(dequantize_op,
                                                    mace_pb2.DT_UINT8)
                else:
                    dequantize_op = net.op[-1]
                    dequantize_op.name = normalize_op_name(output_name)
                    del dequantize_op.input[:]
                    del dequantize_op.output[:]
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.node_input[0].node_id = op.node_id
                    dequantize_op.output.append(output_name)
                    if dequantize_op.type == \
                            HexagonOp.DequantizeOUTPUT_8tof.name:
                        input_min = new_output_name[:-1] + '1'
                        input_max = new_output_name[:-1] + '2'
                        dequantize_op.input.extend([input_min, input_max])
                        dequantize_op.node_input[1].node_id = op.node_id
                        dequantize_op.node_input[2].node_id = op.node_id
                        del dequantize_op.node_input[3:]
                    else:
                        del dequantize_op.node_input[1:]

        model_path = save_model_to_proto(net, normalize_op_name(op_name),
                                         output_dir)
        output_config = {
            "model_file_path": str(model_path),
            "output_tensors": output_tensors,
            "output_shapes": output_shapes,
            "output_data_formats": [DataFormat.NHWC.name]
        }
        output_configs["subgraphs"].append(output_config)

    output_configs_path = output_dir + "outputs.yml"
    with open(output_configs_path, "w") as f:
        yaml.dump(output_configs, f, default_flow_style=False)