示例#1
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            OnnxOpType.Abs.name: self.convert_eltwise,
            OnnxOpType.Add.name: self.convert_eltwise,
            OnnxOpType.ArgMax.name: self.convert_argmax,
            OnnxOpType.ArgMin.name: self.convert_argmax,
            OnnxOpType.AveragePool.name: self.convert_pooling,
            OnnxOpType.BatchNormalization.name: self.convert_fused_batchnorm,
            OnnxOpType.Cast.name: self.convert_cast,
            OnnxOpType.Concat.name: self.convert_concat,
            OnnxOpType.Conv.name: self.convert_conv2d,
            OnnxOpType.ConvTranspose.name: self.convert_deconv,
            OnnxOpType.DepthToSpace.name: self.convert_depth_space,
            OnnxOpType.Dropout.name: self.convert_identity,
            OnnxOpType.Div.name: self.convert_eltwise,
            OnnxOpType.Equal.name: self.convert_eltwise,
            OnnxOpType.Gather.name: self.convert_gather,
            OnnxOpType.Gemm.name: self.convert_gemm,
            OnnxOpType.GlobalAveragePool.name: self.convert_reduce,
            OnnxOpType.GlobalMaxPool.name: self.convert_reduce,
            OnnxOpType.Identity.name: self.convert_identity,
            OnnxOpType.ImageScaler.name: self.convert_imagescaler,
            OnnxOpType.LeakyRelu.name: self.convert_activation,
            OnnxOpType.Max.name: self.convert_eltwise,
            OnnxOpType.MaxPool.name: self.convert_pooling,
            OnnxOpType.MatMul.name: self.convert_matmul,
            OnnxOpType.Min.name: self.convert_eltwise,
            OnnxOpType.Mul.name: self.convert_eltwise,
            OnnxOpType.Neg.name: self.convert_eltwise,
            OnnxOpType.Pad.name: self.convert_pad,
            OnnxOpType.Pow.name: self.convert_eltwise,
            OnnxOpType.PRelu.name: self.convert_activation,
            OnnxOpType.Relu.name: self.convert_activation,
            OnnxOpType.Reshape.name: self.convert_reshape,
            OnnxOpType.Reciprocal.name: self.convert_eltwise,
            OnnxOpType.Sigmoid.name: self.convert_activation,
            OnnxOpType.Softmax.name: self.convert_softmax,
            OnnxOpType.SpaceToDepth.name: self.convert_depth_space,
            OnnxOpType.Split.name: self.convert_split,
            OnnxOpType.Sqrt.name: self.convert_eltwise,
            OnnxOpType.Squeeze.name: self.convert_squeeze,
            OnnxOpType.Sub.name: self.convert_eltwise,
            OnnxOpType.Sum.name: self.convert_eltwise,
            OnnxOpType.Tanh.name: self.convert_activation,
            OnnxOpType.Transpose.name: self.convert_transpose,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.OIHW)
        onnx_model = onnx.load(src_model_file)

        polished_model = onnx.utils.polish_model(onnx_model)

        print "onnx model IR version: ", onnx_model.ir_version
        print "onnx model opset import: ", onnx_model.opset_import

        self._onnx_model = shape_inference.infer_shapes(polished_model)
        self._graph_shapes_dict = {}
        self._consts = {}
        self._replace_tensors = {}
示例#2
0
文件: graph_util.py 项目: TTWuser/ttw
def sort_mace_graph(graph_def, output_name):
    nodes_map = {}
    ordered_nodes_map = OrderedDict()
    for node in graph_def.op:
        nodes_map[node.name] = node
    sort_mace_node(nodes_map[output_name], nodes_map, ordered_nodes_map)
    sorted_graph = mace_pb2.NetDef()
    sorted_graph.tensors.extend(graph_def.tensors)
    sorted_graph.op.extend([node for node in ordered_nodes_map.values()])
    return sorted_graph
示例#3
0
    def __init__(self, option, src_model_file, src_weight_file):
        self._op_converters = {
            'Input': self.convert_nop,
            'Convolution': self.convert_conv2d,
            'Deconvolution': self.convert_deconv2d,
            'Eltwise': self.convert_elementwise,
            'Add': self.convert_add,
            'ReLU': self.convert_activation,
            'TanH': self.convert_activation,
            'Sigmoid': self.convert_activation,
            'PReLU': self.convert_activation,
            'Pooling': self.convert_pooling,
            'Concat': self.convert_concat,
            'Slice': self.convert_slice,
            'Softmax': self.convert_softmax,
            'InnerProduct': self.convert_fully_connected,
            'Interp': self.convert_interp,
            'BatchNorm': self.convert_folded_batchnorm,
            'Crop': self.convert_crop,
            'Scale': self.convert_scale,
            'ShuffleChannel': self.convert_channel_shuffle,
            'Permute': self.convert_permute,
            'Flatten': self.convert_flatten,
            'PriorBox': self.convert_prior_box,
            'Reshape': self.convert_reshape,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
        self._caffe_net = CaffeNet()
        self._caffe_layers = caffe_pb2.NetParameter()
        caffe_weights = caffe_pb2.NetParameter()

        # parse prototxt
        with open(src_model_file, 'r') as f:
            google.protobuf.text_format.Merge(str(f.read()),
                                              self._caffe_layers)
            self.filter_test_layers(self._caffe_layers)
            for layer in self._caffe_layers.layer:
                self._caffe_net.add_layer(layer)

        # parse model weight
        with open(src_weight_file, 'rb') as f:
            caffe_weights.ParseFromString(f.read())
            self.filter_test_layers(caffe_weights)
            for weight in caffe_weights.layer:
                self._caffe_net.add_blob(weight)

        self._skip_ops = []
示例#4
0
def convert_to_mace_pb(model_file, input_node, output_node, dsp_mode):
    """
    nnlib does not have batch norm, so use tensorflow optimizer to fold
     batch norm with convolution. The fold optimization reorders ops, so
     we sort ops first by topology.
  """
    input_graph_def = tf.GraphDef()
    with gfile.Open(model_file, "rb") as f:
        data = f.read()
        input_graph_def.ParseFromString(data)

    input_graph_def = graph_util.sort_tf_graph(input_graph_def)
    net_def = mace_pb2.NetDef()

    with tf.Session() as session:
        with session.graph.as_default() as graph:
            tf.import_graph_def(input_graph_def, name="")
            ops = graph.get_operations()
            dsp_ops = DspOps()
            resolved_ops = set()
            # convert const node
            unresolved_ops = [op for op in ops if op.type == 'Const']
            while len(unresolved_ops) > 0:
                convert_ops(unresolved_ops, resolved_ops, net_def, output_node,
                            dsp_ops)

            # convert op node
            unresolved_ops = [op for op in ops if op.type != 'Const']
            while len(unresolved_ops) > 0:
                convert_ops(unresolved_ops, resolved_ops, net_def, output_node,
                            dsp_ops)

            add_output_node(net_def, output_node)
            net_def = reverse_batch_to_space_and_biasadd(net_def)
            net_def = fuse_quantize(net_def, input_node, output_node)

            sorted_net_def = graph_util.sort_mace_graph(net_def, '__output__')
            net_def_with_node_id = add_node_id(sorted_net_def)

            dtype = mace_pb2.DT_FLOAT
            final_net_def = add_input_output_info(net_def_with_node_id,
                                                  input_node, output_node,
                                                  graph, dtype)

            arg = final_net_def.arg.add()
            arg.name = 'dsp_mode'
            arg.i = dsp_mode

    return final_net_def
示例#5
0
def fuse_quantize(net_def, input_node, output_node):
    tensor_map = {}
    for tensor in net_def.tensors:
        tensor_map[tensor.name] = tensor
    op_map = {}
    for op in net_def.op:
        op_map[op.name] = op
    consumers = {}
    for op in net_def.op:
        for ipt in op.input:
            if ipt not in consumers:
                consumers[ipt] = []
            consumers[ipt].append(op)

    skip_ops = set()
    new_ops = []
    skip_tensors = set()

    # INPUT->Flatten->Minf, Maxf->Quantize
    for op in net_def.op:
        if op.type == 'INPUT':
            input_op = op
            flatten_op = None
            quantize_op = None
            for o in consumers[get_tensor_name_from_op(input_op.name, 0)]:
                if o.type == 'Flatten':
                    flatten_op = o
                elif o.type == 'Quantize':
                    quantize_op = o
            if quantize_op is not None:
                minf_op, maxf_op = consumers[get_tensor_name_from_op(
                    flatten_op.name, 0)]
                skip_ops = skip_ops.union(
                    [flatten_op.name, minf_op.name, maxf_op.name])
                skip_tensors = skip_tensors.union([
                    minf_op.input[0], maxf_op.input[0], quantize_op.input[1],
                    quantize_op.input[2]
                ])
                quantize_op.type = 'AutoQuantize'
                del quantize_op.input[1:]

    new_net_def = mace_pb2.NetDef()
    new_net_def.tensors.extend([
        tensor for tensor in net_def.tensors if tensor.name not in skip_tensors
    ])
    new_net_def.op.extend([op for op in net_def.op if op.name not in skip_ops])
    new_net_def.op.extend(new_ops)
    return new_net_def
示例#6
0
    def __init__(self, option, src_model_file):
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self.add_shape_info(tf_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(tf_graph_def, name='')
                self._tf_graph = graph
示例#7
0
def main(unused_args):
    mace_check(os.path.isfile(FLAGS.model_file),
               "Input graph file '" + FLAGS.model_file + "' does not exist!")
    mace_check(os.path.isdir(FLAGS.output_dir),
               "Output directory '" + FLAGS.output_dir + "' does not exist!")
    net_def = mace_pb2.NetDef()
    with open(FLAGS.model_file, "rb") as f:
        net_def.ParseFromString(f.read())

    quantize_flag = ConverterUtil.get_arg(
        net_def, MaceKeyword.mace_quantize_flag_arg_str)
    quantize_flag = False if quantize_flag is None else quantize_flag.i == 1
    hexagon_flag = False
    index = 0
    end_index = len(net_def.op)
    if quantize_flag:
        while index < end_index:
            # omit op quantize
            if net_def.op[index].type == MaceOp.Quantize.name or \
                    net_def.op[index].type == \
                    HexagonOp.QuantizeINPUT_f_to_8.name:
                index += 1
            # omit op dequantize
            elif net_def.op[end_index - 1].type == MaceOp.Dequantize.name or \
                    net_def.op[end_index - 1].type == \
                    HexagonOp.DequantizeOUTPUT_8tof.name:
                end_index -= 1
            else:
                break
        mace_check(0 < index < end_index < len(net_def.op),
                   "Wrong number of op quantize(%d) or dequantize(%d)." %
                   (index, len(net_def.op) - end_index))
        if net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name:
            hexagon_flag = True
    # omit original output
    end_index -= 1

    data_format = net_def.output_info[0].data_format
    output_configs = {"subgraphs": []}
    while index < end_index:
        # omit BatchToSpaceND and op before that due to changed graph
        if net_def.op[index].type == MaceOp.BatchToSpaceND.name or \
                net_def.op[index].type == HexagonOp.BatchToSpaceND_8.name or \
                (index + 1 < end_index and
                 (net_def.op[index + 1].type == MaceOp.BatchToSpaceND.name or
                  net_def.op[index + 1].type == HexagonOp.BatchToSpaceND_8.name)):  # noqa
            index += 1
            continue
        net = copy.deepcopy(net_def)
        if hexagon_flag:
            # reuse dequantize op and it's min/max tensor's node_id
            del net.op[index+1:end_index+1]
        else:
            del net.op[index+1:]
        del net.output_info[:]
        op = net.op[index]
        index += 1

        output_tensors = []
        output_shapes = []
        op_name = op.name
        if quantize_flag:
            op.name = MaceKeyword.mace_output_node_name + '_' + op.name
        if hexagon_flag:
            mace_check(len(op.output) == 1,
                       "Only supports number of outputs of Hexagon op be 1.")
        for i in range(len(op.output)):
            output_tensors.append(str(op.output[i]))
            output_shapes.append(
                ",".join([str(dim) for dim in op.output_shape[i].dims]))
            # modify output info
            output_info = net.output_info.add()
            output_info.name = op.output[i]
            output_info.data_format = data_format
            output_info.dims.extend(op.output_shape[i].dims)
            output_info.data_type = mace_pb2.DT_FLOAT
            # modify output op
            if quantize_flag:
                output_name = op.output[i]
                new_output_name = \
                    MaceKeyword.mace_output_node_name + '_' + op.output[i]
                op.output[i] = new_output_name
                if not hexagon_flag:
                    dequantize_op = net.op.add()
                    dequantize_op.name = normalize_op_name(output_name)
                    dequantize_op.type = MaceOp.Dequantize.name
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    output_shape = dequantize_op.output_shape.add()
                    output_shape.dims.extend(op.output_shape[i].dims)
                    dequantize_op.output_type.append(mace_pb2.DT_FLOAT)
                    ConverterUtil.add_data_type_arg(dequantize_op,
                                                    mace_pb2.DT_UINT8)
                else:
                    dequantize_op = net.op[-1]
                    dequantize_op.name = normalize_op_name(output_name)
                    del dequantize_op.input[:]
                    del dequantize_op.output[:]
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    input_min = new_output_name[:-1] + '1'
                    input_max = new_output_name[:-1] + '2'
                    dequantize_op.input.extend([input_min, input_max])
                    dequantize_op.node_input[0].node_id = op.node_id
                    dequantize_op.node_input[1].node_id = op.node_id
                    dequantize_op.node_input[2].node_id = op.node_id

        model_path = save_model_to_proto(net, normalize_op_name(op_name),
                                         FLAGS.output_dir)
        output_config = {"model_file_path": str(model_path),
                         "output_tensors": output_tensors,
                         "output_shapes": output_shapes}
        output_configs["subgraphs"].append(output_config)

    output_configs_path = FLAGS.output_dir + "outputs.yml"
    with open(output_configs_path, "w") as f:
        yaml.dump(output_configs, f, default_flow_style=False)
示例#8
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,  # noqa
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.Split.name: self.convert_split,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self.add_shape_info(tf_graph_def)

        print("Run transform_graph: %s" %
              TFTransformGraphOptions[option.device])
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(
                tf_graph_def, option.input_nodes.keys(),
                option.output_nodes.keys(),
                TFTransformGraphOptions[option.device])
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()
        self._output_shape_list = []
        self._output_shape_op_list = []
示例#9
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_reduce,
            TFOpType.Max.name: self.convert_reduce,
            TFOpType.Mean.name: self.convert_reduce,
            TFOpType.Prod.name: self.convert_reduce,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,  # noqa
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.GatherV2.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.Split.name: self.convert_split,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
            TFOpType.Cumsum.name: self.convert_cumsum,
            TFOpType.OneHot.name: self.convert_one_hot,
            TFOpType.Sum.name: self.convert_reduce,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self._skip_tensor = set()
        self._output_shape = {}

        print("Run transform_graph: %s" % TFTransformGraphOptions)
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(tf_graph_def,
                                                   option.input_nodes.keys(),
                                                   option.output_nodes.keys(),
                                                   TFTransformGraphOptions)
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        # To check optimized model, uncomment following code.
        # tf.io.write_graph(
        #     transformed_graph_def,
        #     ".",
        #     os.path.basename(src_model_file)[:-3] + "_opt.pb",
        #     as_text=False
        # )

        self.add_shape_info(transformed_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
                self.update_output_shapes(session)

        # we have polluted graph with 'shape' ops, so reset it and reload it
        # again
        tf.reset_default_graph()

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
示例#10
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self.add_shape_info(tf_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(tf_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()
示例#11
0
def reverse_batch_to_space_and_biasadd(net_def):
    tensor_map = {}
    for tensor in net_def.tensors:
        tensor_map[tensor.name] = tensor
    op_map = {}
    for op in net_def.op:
        op_map[op.name] = op
    consumers = {}
    for op in net_def.op:
        for ipt in op.input:
            if ipt not in consumers:
                consumers[ipt] = []
            consumers[ipt].append(op)

    new_ops = []
    skip_ops = set()
    visited_ops = set()

    for op in net_def.op:
        if op.name in visited_ops:
            pass
        # pattern: QConv -> RR -> R -> QB2S -> QBiasAdd -> RR -> R
        success = False
        if op.type == 'Requantize_32to8':
            biasadd_requantize_op = op
            biasadd_op = get_node_from_map(op_map,
                                           biasadd_requantize_op.input[0])
            if biasadd_op.type == 'QuantizedBiasAdd_8p8to32':
                b2s_op = get_node_from_map(op_map, biasadd_op.input[0])
                if b2s_op.type == 'QuantizedBatchToSpaceND_8':
                    conv_requantize_op = get_node_from_map(
                        op_map, b2s_op.input[0])
                    conv_op = get_node_from_map(op_map,
                                                conv_requantize_op.input[0])
                    if conv_op.type == 'QuantizedConv2d_8x8to32':
                        new_biasadd_op = mace_pb2.OperatorDef()
                        new_biasadd_op.CopyFrom(biasadd_op)
                        new_biasadd_op.input[0] = get_tensor_name_from_op(
                            conv_requantize_op.name, 0)
                        new_biasadd_op.input[2] = get_tensor_name_from_op(
                            conv_requantize_op.name, 1)
                        new_biasadd_op.input[3] = get_tensor_name_from_op(
                            conv_requantize_op.name, 2)
                        new_biasadd_op.out_max_byte_size[
                            0] = conv_requantize_op.out_max_byte_size[0] * 4

                        new_biasadd_requantize_op = mace_pb2.OperatorDef()
                        new_biasadd_requantize_op.CopyFrom(
                            biasadd_requantize_op)
                        new_biasadd_requantize_op.out_max_byte_size[
                            0] = new_biasadd_op.out_max_byte_size[0] / 4

                        new_b2s_op = mace_pb2.OperatorDef()
                        new_b2s_op.CopyFrom(b2s_op)
                        new_b2s_op.input[0] = get_tensor_name_from_op(
                            biasadd_requantize_op.name, 0)
                        new_b2s_op.input[3] = get_tensor_name_from_op(
                            biasadd_requantize_op.name, 1)
                        new_b2s_op.input[4] = get_tensor_name_from_op(
                            biasadd_requantize_op.name, 2)

                        new_ops.extend([
                            new_biasadd_op, new_biasadd_requantize_op,
                            new_b2s_op
                        ])
                        skip_ops = skip_ops.union([
                            biasadd_op.name, biasadd_requantize_op.name,
                            b2s_op.name
                        ])
                        visited_ops.add(op.name)

                        follow_ops = consumers[get_tensor_name_from_op(
                            biasadd_requantize_op.name, 0)]
                        for follow_op in follow_ops:
                            new_follow_op = mace_pb2.OperatorDef()
                            new_follow_op.CopyFrom(follow_op)
                            for i in xrange(len(follow_op.input)):
                                for k in xrange(3):
                                    if new_follow_op.input[
                                            i] == get_tensor_name_from_op(
                                                biasadd_requantize_op.name, k):
                                        new_follow_op.input[
                                            i] = get_tensor_name_from_op(
                                                b2s_op.name, k)
                            new_ops.append(new_follow_op)
                            skip_ops.add(follow_op.name)
                            visited_ops.add(follow_op.name)

        visited_ops.add(op.name)

    new_net_def = mace_pb2.NetDef()
    new_net_def.tensors.extend(tensor_map.values())
    new_net_def.op.extend([op for op in net_def.op if op.name not in skip_ops])
    new_net_def.op.extend(new_ops)

    return new_net_def
示例#12
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            OnnxOpType.Abs.name:
            self.convert_eltwise,
            OnnxOpType.Add.name:
            self.convert_eltwise,
            OnnxOpType.Affine.name:
            self.convert_affine,
            OnnxOpType.Append.name:
            self.convert_concat,
            OnnxOpType.ArgMax.name:
            self.convert_argmax,
            OnnxOpType.ArgMin.name:
            self.convert_argmax,
            OnnxOpType.AveragePool.name:
            self.convert_pooling,
            OnnxOpType.BatchNormalization.name:
            self.convert_fused_batchnorm,
            OnnxOpType.Cast.name:
            self.convert_cast,
            OnnxOpType.Concat.name:
            self.convert_concat,
            OnnxOpType.Conv.name:
            self.convert_conv2d,
            OnnxOpType.ConvTranspose.name:
            self.convert_deconv,
            OnnxOpType.DepthToSpace.name:
            self.convert_depth_space,
            OnnxOpType.Dropout.name:
            self.convert_identity,
            OnnxOpType.DimRange.name:
            self.convert_dim_range,
            OnnxOpType.Div.name:
            self.convert_eltwise,
            OnnxOpType.Equal.name:
            self.convert_eltwise,
            OnnxOpType.Gather.name:
            self.convert_gather,
            OnnxOpType.Gemm.name:
            self.convert_gemm,
            OnnxOpType.GlobalAveragePool.name:
            self.convert_reduce,
            OnnxOpType.GlobalMaxPool.name:
            self.convert_reduce,
            OnnxOpType.Identity.name:
            self.convert_identity,
            OnnxOpType.IfDefined.name:
            self.convert_identity,
            OnnxOpType.ImageScaler.name:
            self.convert_imagescaler,
            OnnxOpType.LeakyRelu.name:
            self.convert_activation,
            # OnnxOpType.LogSoftmax.name: self.convert_softmax,
            OnnxOpType.LSTM.name:
            self.convert_lstm,
            # OnnxOpType.LstmNonlinear.name: self.convert_lstm_nonlinear,
            OnnxOpType.Max.name:
            self.convert_eltwise,
            OnnxOpType.MaxPool.name:
            self.convert_pooling,
            OnnxOpType.MatMul.name:
            self.convert_matmul,
            OnnxOpType.Min.name:
            self.convert_eltwise,
            OnnxOpType.Mul.name:
            self.convert_eltwise,
            OnnxOpType.Neg.name:
            self.convert_eltwise,
            OnnxOpType.Normalize:
            self.convert_normalize,
            OnnxOpType.Offset.name:
            self.convert_timeoffset,
            OnnxOpType.Padding.name:
            self.convert_identity,
            OnnxOpType.PNorm.name:
            self.convert_pnorm,
            OnnxOpType.Pow.name:
            self.convert_eltwise,
            OnnxOpType.PRelu.name:
            self.convert_activation,
            OnnxOpType.Relu.name:
            self.convert_activation,
            OnnxOpType.Reshape.name:
            self.convert_reshape,
            OnnxOpType.Reciprocal.name:
            self.convert_eltwise,
            OnnxOpType.Scale.name:
            self.convert_eltwise,
            OnnxOpType.Sigmoid.name:
            self.convert_activation,
            OnnxOpType.Slice.name:
            self.convert_slice,
            OnnxOpType.Softmax.name:
            self.convert_softmax,
            OnnxOpType.SpaceToDepth.name:
            self.convert_depth_space,
            OnnxOpType.Splice.name:
            self.convert_splice,
            OnnxOpType.Split.name:
            self.convert_split,
            OnnxOpType.Sqrt.name:
            self.convert_eltwise,
            OnnxOpType.Squeeze.name:
            self.convert_squeeze,
            OnnxOpType.Sub.name:
            self.convert_eltwise,
            OnnxOpType.Sum.name:
            self.convert_eltwise,
            OnnxOpType.SumGroup.name:
            self.convert_sum_group,
            OnnxOpType.Tanh.name:
            self.convert_activation,
            OnnxOpType.TargetRMSNorm:
            self.convert_target_rms_norm,
            OnnxOpType.Transpose.name:
            self.convert_transpose,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        self._data_format = DataFormat.NCHW
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        onnx_model = onnx.load(src_model_file)

        ir_version = onnx_model.ir_version
        opset_imp = onnx_model.opset_import

        polish_available = True
        print("onnx model IR version: ", ir_version)
        for imp in opset_imp:
            domain = imp.domain
            version = imp.version
            print("constains ops domain: ", domain, "version:", version)
            if 'kaldi2onnx' in domain:
                polish_available = False
                self._data_format = DataFormat.DF_NONE
        if polish_available:
            onnx_model = onnx.utils.polish_model(onnx_model)

        self._onnx_model = onnx_model
        self._graph_shapes_dict = {}
        self._consts = {}
        self._replace_tensors = {}