Пример #1
0
    def convert_general_op(self, tf_op):
        op = self._mace_net_def.op.add()
        op.name = tf_op.name
        op.type = tf_op.type
        op.input.extend([tf_input.name for tf_input in tf_op.inputs])
        op.output.extend([tf_output.name for tf_output in tf_op.outputs])
        for tf_output in tf_op.outputs:
            output_shape = op.output_shape.add()
            output_shape.dims.extend(self.infer_tensor_shape(tf_output))

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        try:
            dtype = tf_op.get_attr('T')
            if dtype == tf.int32:
                data_type_arg.i = mace_pb2.DT_INT32
            elif dtype == tf.float32:
                data_type_arg.i = self._option.data_type
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        return op
Пример #2
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            OnnxOpType.Abs.name: self.convert_eltwise,
            OnnxOpType.Add.name: self.convert_eltwise,
            OnnxOpType.ArgMax.name: self.convert_argmax,
            OnnxOpType.ArgMin.name: self.convert_argmax,
            OnnxOpType.AveragePool.name: self.convert_pooling,
            OnnxOpType.BatchNormalization.name: self.convert_fused_batchnorm,
            OnnxOpType.Cast.name: self.convert_cast,
            OnnxOpType.Concat.name: self.convert_concat,
            OnnxOpType.Conv.name: self.convert_conv2d,
            OnnxOpType.ConvTranspose.name: self.convert_deconv,
            OnnxOpType.DepthToSpace.name: self.convert_depth_space,
            OnnxOpType.Dropout.name: self.convert_identity,
            OnnxOpType.Div.name: self.convert_eltwise,
            OnnxOpType.Equal.name: self.convert_eltwise,
            OnnxOpType.Gather.name: self.convert_gather,
            OnnxOpType.Gemm.name: self.convert_gemm,
            OnnxOpType.GlobalAveragePool.name: self.convert_reduce,
            OnnxOpType.GlobalMaxPool.name: self.convert_reduce,
            OnnxOpType.Identity.name: self.convert_identity,
            OnnxOpType.ImageScaler.name: self.convert_imagescaler,
            OnnxOpType.LeakyRelu.name: self.convert_activation,
            OnnxOpType.Max.name: self.convert_eltwise,
            OnnxOpType.MaxPool.name: self.convert_pooling,
            OnnxOpType.MatMul.name: self.convert_matmul,
            OnnxOpType.Min.name: self.convert_eltwise,
            OnnxOpType.Mul.name: self.convert_eltwise,
            OnnxOpType.Neg.name: self.convert_eltwise,
            OnnxOpType.Pad.name: self.convert_pad,
            OnnxOpType.Pow.name: self.convert_eltwise,
            OnnxOpType.PRelu.name: self.convert_activation,
            OnnxOpType.Relu.name: self.convert_activation,
            OnnxOpType.Reshape.name: self.convert_reshape,
            OnnxOpType.Reciprocal.name: self.convert_eltwise,
            OnnxOpType.Sigmoid.name: self.convert_activation,
            OnnxOpType.Softmax.name: self.convert_softmax,
            OnnxOpType.SpaceToDepth.name: self.convert_depth_space,
            OnnxOpType.Split.name: self.convert_split,
            OnnxOpType.Sqrt.name: self.convert_eltwise,
            OnnxOpType.Squeeze.name: self.convert_squeeze,
            OnnxOpType.Sub.name: self.convert_eltwise,
            OnnxOpType.Sum.name: self.convert_eltwise,
            OnnxOpType.Tanh.name: self.convert_activation,
            OnnxOpType.Transpose.name: self.convert_transpose,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.OIHW)
        onnx_model = onnx.load(src_model_file)

        polished_model = onnx.utils.polish_model(onnx_model)

        print "onnx model IR version: ", onnx_model.ir_version
        print "onnx model opset import: ", onnx_model.opset_import

        self._onnx_model = shape_inference.infer_shapes(polished_model)
        self._graph_shapes_dict = {}
        self._consts = {}
        self._replace_tensors = {}
Пример #3
0
    def convert_general_op(self, tf_op):
        op = self._mace_net_def.op.add()
        op.name = tf_op.name
        op.type = tf_op.type
        op.input.extend([tf_input.name for tf_input in tf_op.inputs])
        op.output.extend([tf_output.name for tf_output in tf_op.outputs])
        for tf_output in tf_op.outputs:
            output_shape = op.output_shape.add()
            self.infer_tensor_shape(output_shape, tf_output)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        try:
            dtype = tf_op.get_attr('T')
            if dtype == tf.int32:
                data_type_arg.i = mace_pb2.DT_INT32
            elif dtype == tf.float32:
                data_type_arg.i = self._option.data_type
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            try:
                dtype = tf_op.get_attr('SrcT')
                if dtype == tf.int32 or dtype == tf.bool:
                    data_type_arg.i = mace_pb2.DT_INT32
                elif dtype == tf.float32:
                    data_type_arg.i = self._option.data_type
                else:
                    mace_check(False, "data type %s not supported" % dtype)
            except ValueError:
                data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        return op
Пример #4
0
    def convert_general_op(self, node, with_shape=True):
        op = self._mace_net_def.op.add()
        op.name = node.name

        for input in node.inputs:
            if input in self._replace_tensors:
                input = self._replace_tensors[input]
            op.input.append(input)
        for output in node.outputs:
            op.output.append(output)
            if with_shape:
                if output in self._graph_shapes_dict:
                    output_shape = op.output_shape.add()
                    shape_info = self._graph_shapes_dict[output]
                    output_shape.dims.extend(shape_info)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.ONNX.value

        ConverterUtil.add_data_format_arg(op, self._data_format)
        return op
Пример #5
0
 def infer_shape_fully_connected(self, op):
     input_shape = self._output_shape_cache[op.input[0]]
     weight_shape = self._output_shape_cache[op.input[1]]
     if ConverterUtil.data_format(op) == DataFormat.NCHW:
         output_shape = [input_shape[0], weight_shape[0], 1, 1]
     else:
         mace_check(False, "format %s is not supported"
                    % ConverterUtil.data_format(op))
     self.add_output_shape(op, [output_shape])
Пример #6
0
 def infer_shape_fully_connected(self, op):
     input_shape = self._output_shape_cache[op.input[0]]
     weight_shape = self._output_shape_cache[op.input[1]]
     if ConverterUtil.data_format(op) == DataFormat.NCHW:
         output_shape = [input_shape[0], weight_shape[0], 1, 1]
     else:
         mace_check(False, "format %s is not supported"
                    % ConverterUtil.data_format(op))
     self.add_output_shape(op, [output_shape])
Пример #7
0
    def convert_gemm(self, node):
        # only supports FullyConnected Style Gemm for now.
        trans_a = node.attrs['transA'] if 'transA' in node.attrs else 0
        trans_b = node.attrs['transB'] if 'transB' in node.attrs else 0
        shape_a = self._graph_shapes_dict[node.inputs[0]]
        shape_b = self._graph_shapes_dict[node.inputs[1]]
        mace_check(trans_a == 0 and trans_b == 1,
                   "Do not support non-default transpose")
        mace_check(len(shape_a) == 4, "Unexpected fc input ndim.")
        mace_check(node.inputs[1] in self._consts, "unexpect fc weight.")
        if len(shape_b) == 4:
            mace_check(
                list(shape_b[2:]) == [1, 1],
                "Only support 4D weight with shape [*, *, 1, 1]")
        elif len(shape_b) == 2:
            tensor_b = self._consts[node.inputs[1]]
            tensor_data = np.array(tensor_b.float_data).reshape(
                shape_b[0], shape_b[1], 1, 1)
            tensor_b.float_data[:] = tensor_data.flat
            tensor_b.dims[:] = tensor_data.shape
        else:
            mace_check(False, "Unexpected fc weigth ndim.")

        op = self._mace_net_def.op.add()
        op.name = node.name
        op.type = MaceOp.FullyConnected.name
        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.ONNX.value

        ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)

        for input in node.inputs:
            op.input.append(input)
        for output in node.outputs:
            op.output.append(output)
            output_shape = op.output_shape.add()
            shape_info = self._graph_shapes_dict[output]
            mace_check(
                len(shape_info) in [2, 4],
                "gemm output shape should be 2 or 4 dims.")
            if len(shape_info) == 4:
                mace_check(
                    shape_info[2] == 1 and shape_info[3] == 1,
                    "gemm's 4-dim output shape should be [*, * , 1, 1]")
            else:
                shape_info = [shape_info[0], shape_info[1], 1, 1]
            output_shape.dims.extend(shape_info)

        return op
Пример #8
0
    def infer_shape_conv_pool_shape(self, op):
        input_shape = self._output_shape_cache[op.input[0]]
        output_shape = np.zeros_like(input_shape)
        if op.type == MaceOp.Pooling:
            filter_shape = list(
                ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str).ints)
            if ConverterUtil.data_format(op) == DataFormat.NCHW:
                filter_shape = [input_shape[1], input_shape[1]] + filter_shape
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[2] = input_shape[2]
                    filter_shape[3] = input_shape[3]
            else:  # NHWC
                filter_shape = filter_shape + [input_shape[1], input_shape[1]]
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[0] = input_shape[1]
                    filter_shape[1] = input_shape[2]
        else:
            filter_shape = self._output_shape_cache[op.input[1]]

        paddings = ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_padding_values_str).ints  # noqa
        strides = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str).ints
        dilations_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_dilations_str)
        if dilations_arg is not None:
            dilations = dilations_arg.ints
        else:
            dilations = [1, 1]
        if op.type == MaceOp.Pooling:
            round_func = math.ceil
        else:
            round_func = math.floor

        output_shape[0] = input_shape[0]
        if ConverterUtil.data_format(op) == DataFormat.NCHW \
                and ConverterUtil.filter_format(self._net) == FilterFormat.OIHW:  # noqa
            # filter format: OIHW
            if op.type == MaceOp.DepthwiseConv2d.name:
                output_shape[1] = filter_shape[0] * filter_shape[1]
            else:
                output_shape[1] = filter_shape[0]
            output_shape[2] = int(
                round_func((input_shape[2] + paddings[0] - filter_shape[2] -
                            (filter_shape[2] - 1) *
                            (dilations[0] - 1)) / float(strides[0]))) + 1
            output_shape[3] = int(
                round_func((input_shape[3] + paddings[1] - filter_shape[3] -
                            (filter_shape[3] - 1) *
                            (dilations[1] - 1)) / float(strides[1]))) + 1
        else:
            mace_check(False,
                       "Mace can only infer shape for"
                       " NCHW input and OIHW filter")

        self.add_output_shape(op, [output_shape])
Пример #9
0
    def infer_shape_conv_pool_shape(self, op):
        input_shape = self._output_shape_cache[op.input[0]]
        output_shape = np.zeros_like(input_shape)
        if op.type == MaceOp.Pooling:
            filter_shape = list(
                ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str).ints)
            if ConverterUtil.data_format(op) == DataFormat.NCHW:
                filter_shape = [input_shape[1], input_shape[1]] + filter_shape
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[2] = input_shape[2]
                    filter_shape[3] = input_shape[3]
            else:  # NHWC
                filter_shape = filter_shape + [input_shape[1], input_shape[1]]
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[0] = input_shape[1]
                    filter_shape[1] = input_shape[2]
        else:
            filter_shape = self._output_shape_cache[op.input[1]]

        paddings = ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_padding_values_str).ints  # noqa
        strides = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str).ints
        dilations_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_dilations_str)
        if dilations_arg is not None:
            dilations = dilations_arg.ints
        else:
            dilations = [1, 1]
        if op.type == MaceOp.Pooling:
            round_func = math.ceil
        else:
            round_func = math.floor

        output_shape[0] = input_shape[0]
        if ConverterUtil.data_format(op) == DataFormat.NCHW \
                and ConverterUtil.filter_format(self._net) == FilterFormat.OIHW:  # noqa
            # filter format: OIHW
            if op.type == MaceOp.DepthwiseConv2d.name:
                output_shape[1] = filter_shape[0] * filter_shape[1]
            else:
                output_shape[1] = filter_shape[0]
            output_shape[2] = int(
                round_func((input_shape[2] + paddings[0] - filter_shape[2] -
                            (filter_shape[2] - 1) *
                            (dilations[0] - 1)) / float(strides[0]))) + 1
            output_shape[3] = int(
                round_func((input_shape[3] + paddings[1] - filter_shape[3] -
                            (filter_shape[3] - 1) *
                            (dilations[1] - 1)) / float(strides[1]))) + 1
        else:
            mace_check(False,
                       "Mace can only infer shape for"
                       " NCHW input and OIHW filter")

        self.add_output_shape(op, [output_shape])
Пример #10
0
    def infer_shape_prior_box(self, op):
        output_shape = [1, 2, 1]
        input_shape = list(self._output_shape_cache[op.input[0]])
        input_w = input_shape[3]
        input_h = input_shape[2]
        min_size = ConverterUtil.get_arg(op, MaceKeyword.mace_min_size_str).floats  # noqa
        max_size = ConverterUtil.get_arg(op, MaceKeyword.mace_max_size_str).floats  # noqa
        aspect_ratio = ConverterUtil.get_arg(op, MaceKeyword.mace_aspect_ratio_str).floats  # noqa
        num_prior = len(aspect_ratio) * len(min_size) + len(max_size)

        output_shape[2] = num_prior * input_h * input_w * 4
        self.add_output_shape(op, [output_shape])
Пример #11
0
 def infer_shape_resize_bilinear(self, op):
     input_shape = self._output_shape_cache[op.input[0]]
     size = ConverterUtil.get_arg(
         op, MaceKeyword.mace_resize_size_str).ints
     if ConverterUtil.data_format(op) == DataFormat.NCHW:
         output_shape = [input_shape[0], input_shape[1], size[0], size[1]]
     elif ConverterUtil.data_format(op) == DataFormat.NHWC:
         output_shape = [input_shape[0], size[0], size[1], input_shape[3]]
     else:
         output_shape = []
         mace_check(False, "format %s is not supported"
                    % ConverterUtil.data_format(op))
     self.add_output_shape(op, [output_shape])
Пример #12
0
    def add_tensorflow_padding_value(self):
        for op in self._model.op:
            padding_type = ConverterUtil.get_arg(op,
                                                 MaceKeyword.mace_padding_str)
            if padding_type is None:
                continue

            padding_arg = op.arg.add()
            padding_arg.name = MaceKeyword.mace_padding_values_str
            if padding_type.i == PaddingMode.VALID.value:
                padding_arg.ints.extend([0, 0, 0, 0])
            elif padding_type.i == PaddingMode.SAME.value:
                stride = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_strides_str).ints
                kernel = []
                dilation = [1, 1]
                if op.type == MaceOp.Conv2D.name or \
                   op.type == MaceOp.DepthwiseConv2d.name:
                    if ConverterUtil.get_arg(
                            op, MaceKeyword.mace_dilations_str) is not None:
                        dilation = ConverterUtil.get_arg(
                            op, MaceKeyword.mace_dilations_str).ints
                    for tensor in self._model.tensors:
                        if tensor.name == op.input[1]:
                            kernel = tensor.dims[1:3]
                            break
                else:
                    kernel = ConverterUtil.get_arg(
                        op, MaceKeyword.mace_kernel_str).ints
                in_size = []
                for input_info in self._model.input_info:
                    if input_info.name == op.input[0]:
                        in_size = input_info.dims[1:3]
                        break
                for _op in self._model.op:
                    for out in _op.output:
                        if out == op.input[0]:
                            in_size = _op.output_shape[0].dims[1:3]
                            break
                    if len(in_size) > 0:
                        break
                out_size = op.output_shape[0].dims[1:3]
                h = (out_size[0] - 1) * stride[0] \
                    + ((kernel[0] - 1) * dilation[0] + 1) - in_size[0]
                w = (out_size[1] - 1) * stride[1] \
                    + ((kernel[1] - 1) * dilation[1] + 1) - in_size[1]
                top = int(np.floor(h / 2))
                left = int(np.floor(w / 2))
                bottom = h - top
                right = w - left
                padding_arg.ints.extend([top, right, bottom, left])
Пример #13
0
    def convert_general_op(self, caffe_op):
        op = self._mace_net_def.op.add()
        op.name = caffe_op.name
        op.type = caffe_op.type
        op.input.extend(caffe_op.layer.bottom)
        op.output.extend(caffe_op.layer.top)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)

        return op
Пример #14
0
    def __init__(self, option, src_model_file, src_weight_file):
        self._op_converters = {
            'Input': self.convert_nop,
            'Convolution': self.convert_conv2d,
            'Deconvolution': self.convert_deconv2d,
            'Eltwise': self.convert_elementwise,
            'Add': self.convert_add,
            'ReLU': self.convert_activation,
            'TanH': self.convert_activation,
            'Sigmoid': self.convert_activation,
            'PReLU': self.convert_activation,
            'Pooling': self.convert_pooling,
            'Concat': self.convert_concat,
            'Slice': self.convert_slice,
            'Softmax': self.convert_softmax,
            'InnerProduct': self.convert_fully_connected,
            'Interp': self.convert_interp,
            'BatchNorm': self.convert_folded_batchnorm,
            'Crop': self.convert_crop,
            'Scale': self.convert_scale,
            'ShuffleChannel': self.convert_channel_shuffle,
            'Permute': self.convert_permute,
            'Flatten': self.convert_flatten,
            'PriorBox': self.convert_prior_box,
            'Reshape': self.convert_reshape,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
        self._caffe_net = CaffeNet()
        self._caffe_layers = caffe_pb2.NetParameter()
        caffe_weights = caffe_pb2.NetParameter()

        # parse prototxt
        with open(src_model_file, 'r') as f:
            google.protobuf.text_format.Merge(str(f.read()),
                                              self._caffe_layers)
            self.filter_test_layers(self._caffe_layers)
            for layer in self._caffe_layers.layer:
                self._caffe_net.add_layer(layer)

        # parse model weight
        with open(src_weight_file, 'rb') as f:
            caffe_weights.ParseFromString(f.read())
            self.filter_test_layers(caffe_weights)
            for weight in caffe_weights.layer:
                self._caffe_net.add_blob(weight)

        self._skip_ops = []
Пример #15
0
    def convert_general_op(self, caffe_op):
        op = self._mace_net_def.op.add()
        op.name = caffe_op.name
        op.type = caffe_op.type
        op.input.extend(caffe_op.layer.bottom)
        op.output.extend(caffe_op.layer.top)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)

        return op
Пример #16
0
    def __init__(self, net_def):
        self.net_def = net_def
        self.idle_mem = set()
        self.op_mem = {}  # op_name->mem_id
        self.mem_block = {}  # mem_id->[size] or mem_id->[x, y]
        self.total_mem_count = 0
        self.input_ref_counter = {}
        self.mem_ref_counter = {}
        ocl_mem_type_arg = ConverterUtil.get_arg(
            net_def, MaceKeyword.mace_opencl_mem_type)
        self.cl_mem_type = ocl_mem_type_arg.i if ocl_mem_type_arg is not None \
            else None

        consumers = {}
        for op in net_def.op:
            if not self.op_need_optimize_memory(op):
                continue
            for ipt in op.input:
                if ipt not in consumers:
                    consumers[ipt] = []
                consumers[ipt].append(op)
        # only ref op's output tensor
        for op in net_def.op:
            if not self.op_need_optimize_memory(op):
                continue
            for output in op.output:
                tensor_name = output
                if tensor_name in consumers:
                    self.input_ref_counter[tensor_name] = \
                        len(consumers[tensor_name])
                else:
                    self.input_ref_counter[tensor_name] = 0
Пример #17
0
 def common_check(self):
     for op in self._model.op:
         mace_check(
             len(op.input) >= 1,
             op.name + ': apu does not support op with 0 input')
         mace_check(
             len(op.output) == 1,
             op.name + ': apu only support single output op')
         mace_check(
             len(op.output) == len(op.output_shape),
             op.name + ': length of output and output_shape not'
             ' match')
         mace_check(
             len(op.output_shape[0].dims) <= 4,
             op.name + ': apu only support 1D~4D tensor')
         mace_check(
             len(op.output) == len(op.quantize_info),
             op.name + ': length of output and quantize_info not'
             ' match')
         data_format = ConverterUtil.data_format(op)
         if data_format is not None and len(op.output_shape[0].dims) == 4:
             mace_check((data_format == DataFormat.NHWC)
                        or (data_format == DataFormat.AUTO),
                        op.name + ': apu only support 4D tensor with NHWC'
                        ' or AUTO format but find ' + str(data_format))
         act_mode_arg = ConverterUtil.get_arg(
             op, MaceKeyword.mace_activation_type_str)
         if act_mode_arg is not None:
             mace_check(
                 act_mode_arg.s == b'RELU' or act_mode_arg.s == b'RELUX',
                 op.name + ': apu only support activation RELU and'
                 ' RELUX')
     for tensor in self._model.tensors:
         mace_check(
             len(tensor.dims) <= 4,
             tensor.name + ': apu only support 1D~4D tensor')
     for input_info in self._model.input_info:
         mace_check(
             len(input_info.dims) <= 4,
             input_info.name + ': apu only support 1D~4D tensor')
         mace_check(input_info.data_type == mace_pb2.DT_FLOAT,
                    input_info.name + ': apu only support float input')
         if len(input_info.dims) == 4:
             mace_check(
                 input_info.data_format == DataFormat.NHWC.value,
                 input_info.name + ': apu only support 4D tensor'
                 ' with NHWC format')
Пример #18
0
    def infer_shape_concat(self, op):
        output_shape = self._output_shape_cache[op.input[0]]
        axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
        for input_node in op.input:
            input_shape = self._output_shape_cache[input_node]
            output_shape[axis] += input_shape[axis]

        self.add_output_shape(op, [output_shape])
Пример #19
0
 def infer_shape_slice(self, op):
     output_shape = self._output_shape_cache[op.input[0]]
     axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
     output_shape[axis] /= len(op.output)
     output_shapes = []
     for _ in op.output:
         output_shapes.append(output_shape)
     self.add_output_shape(op, output_shapes)
Пример #20
0
 def infer_shape_slice(self, op):
     output_shape = self._output_shape_cache[op.input[0]]
     axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
     output_shape[axis] /= len(op.output)
     output_shapes = []
     for _ in op.output:
         output_shapes.append(output_shape)
     self.add_output_shape(op, output_shapes)
Пример #21
0
    def infer_shape_concat(self, op):
        output_shape = self._output_shape_cache[op.input[0]]
        axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
        for input_node in op.input:
            input_shape = self._output_shape_cache[input_node]
            output_shape[axis] += input_shape[axis]

        self.add_output_shape(op, [output_shape])
Пример #22
0
    def convert_scale(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        op.type = MaceOp.Eltwise.name

        scale_op_name = op.name
        op.name = scale_op_name + '_prod'

        type_arg = op.arg.add()
        type_arg.name = MaceKeyword.mace_element_type_str
        type_arg.i = EltwiseType.PROD.value

        scale_tensor_name = scale_op_name + '_scale'
        scale_data = caffe_op.blobs[0]
        self.add_tensor(scale_tensor_name, scale_data.shape,
                        mace_pb2.DT_FLOAT, scale_data)
        op.input.extend([scale_tensor_name])

        if len(caffe_op.blobs) == 2:
            bias_tensor_name = scale_op_name + '_offset'
            bias_data = caffe_op.blobs[1]
            # caffe of old version has 4-dimension bias, so reshape it
            # to single dimension
            self.add_tensor(bias_tensor_name, bias_data.reshape(-1).shape,
                            mace_pb2.DT_FLOAT,
                            bias_data)
            op.input.extend([bias_tensor_name])

            biasadd_op = self._mace_net_def.op.add()
            biasadd_op.name = scale_op_name + '_biasadd'
            biasadd_op.type = MaceOp.BiasAdd.name
            biasadd_op.output.extend(op.output)
            op.output[:] = [op.output[0] + '_prod_output']
            biasadd_op.input.extend(op.output)
            biasadd_op.input.extend([op.input[2]])

            biasadd_op.output_shape.extend(op.output_shape)

            del op.input[2]

            data_type_arg = biasadd_op.arg.add()
            data_type_arg.name = 'T'
            data_type_arg.i = self._option.data_type

            ConverterUtil.add_data_format_arg(biasadd_op,
                                              DataFormat.NCHW)
Пример #23
0
 def infer_shape_crop(self, op):
     mace_check(len(op.input) == 2, "crop layer needs two inputs")
     output_shape = self._output_shape_cache[op.input[0]]
     input1_shape = self._output_shape_cache[op.input[1]]
     offsets = ConverterUtil.get_arg(op, MaceKeyword.mace_offset_str).ints
     for i in range(len(offsets)):
         if offsets[i] >= 0:
             output_shape[i] = input1_shape[i]
     self.add_output_shape(op, [output_shape])
Пример #24
0
    def convert_general_op(self, caffe_op):
        op = self._mace_net_def.op.add()
        op.name = caffe_op.name
        op.type = caffe_op.type
        op.input.extend(caffe_op.layer.bottom)
        op.output.extend(caffe_op.layer.top)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.CAFFE.value

        ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)

        return op
Пример #25
0
 def add_int_tensor_from_arg(self, op, keyword):
     int_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(int_value_arg.i is not None,
                op.name + ': ' + keyword + ' value i should not be None')
     int_value_tensor = self._model.tensors.add()
     int_value_tensor.name = op.name + '/' + keyword + ':0'
     int_value_tensor.data_type = mace_pb2.DT_INT32
     int_value_tensor.dims.extend([1])
     int_value_tensor.int32_data.extend([int_value_arg.i])
     op.input.extend([int_value_tensor.name])
Пример #26
0
 def add_int_list_tensor_from_arg(self, op, keyword):
     list_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(list_value_arg.ints is not None,
                op.name + ': ' + keyword + ' value ints should not be None')
     list_value_tensor = self._model.tensors.add()
     list_value_tensor.name = op.name + '/' + keyword + ':0'
     list_value_tensor.data_type = mace_pb2.DT_INT32
     list_value_tensor.dims.extend([len(list_value_arg.ints)])
     list_value_tensor.int32_data.extend(list_value_arg.ints)
     op.input.extend([list_value_tensor.name])
Пример #27
0
 def infer_shape_concat(self, op):
     output_shape = list(self._output_shape_cache[op.input[0]])
     axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
     if axis < 0:
         axis = len(output_shape) + axis
     output_shape[axis] = 0
     for input_node in op.input:
         input_shape = list(self._output_shape_cache[input_node])
         output_shape[axis] = output_shape[axis] + input_shape[axis]
     self.add_output_shape(op, [output_shape])
Пример #28
0
    def __init__(self, option, src_model_file, src_weight_file):
        self._op_converters = {
            'Input': self.convert_nop,
            'Convolution': self.convert_conv2d,
            'Deconvolution': self.convert_deconv2d,
            'Eltwise': self.convert_elementwise,
            'Add': self.convert_add,
            'ReLU': self.convert_activation,
            'TanH': self.convert_activation,
            'Sigmoid': self.convert_activation,
            'PReLU': self.convert_activation,
            'Pooling': self.convert_pooling,
            'Concat': self.convert_concat,
            'Slice': self.convert_slice,
            'Softmax': self.convert_softmax,
            'InnerProduct': self.convert_fully_connected,
            'BatchNorm': self.convert_folded_batchnorm,
            'Crop': self.convert_crop,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.OIHW)
        self._caffe_net = CaffeNet()
        self._caffe_layers = caffe_pb2.NetParameter()
        caffe_weights = caffe_pb2.NetParameter()

        # parse prototxt
        with open(src_model_file, 'rb') as f:
            google.protobuf.text_format.Merge(
                str(f.read()), self._caffe_layers)
            self.filter_test_layers(self._caffe_layers)
            for layer in self._caffe_layers.layer:
                self._caffe_net.add_layer(layer)

        # parse model weight
        with open(src_weight_file, 'rb') as f:
            caffe_weights.ParseFromString(f.read())
            self.filter_test_layers(caffe_weights)
            for weight in caffe_weights.layer:
                self._caffe_net.add_blob(weight)

        self._skip_ops = []
Пример #29
0
 def add_size_tensor_from_arg(self, op, keyword):
     size_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(
         len(size_value_arg.ints) == 2,
         op.name + ': ' + keyword + ' value does not have size 2')
     size_value_tensor = self._model.tensors.add()
     size_value_tensor.name = op.name + '/' + keyword + ':0'
     size_value_tensor.data_type = mace_pb2.DT_INT32
     size_value_tensor.dims.extend([2])
     size_value_tensor.int32_data.extend(size_value_arg.ints)
     op.input.extend([size_value_tensor.name])
Пример #30
0
 def add_padding_tensor_from_arg(self, op):
     padding_value_arg = ConverterUtil.get_arg(
         op, MaceKeyword.mace_padding_values_str)
     mace_check(
         len(padding_value_arg.ints) == 4,
         op.name + ': padding value does not have size 4')
     padding_value_tensor = self._model.tensors.add()
     padding_value_tensor.name = op.name + '/padding:0'
     padding_value_tensor.data_type = mace_pb2.DT_INT32
     padding_value_tensor.dims.extend([4])
     padding_value_tensor.int32_data.extend(padding_value_arg.ints)
     op.input.extend([padding_value_tensor.name])
Пример #31
0
 def infer_shape_reshape(self, op):
     if ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str) is not None:
         dim = ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str).ints
         output_shape = list(dim)
         product = input_size = 1
         idx = -1
         for i in range(len(self._output_shape_cache[op.input[0]])):
             input_size *= self._output_shape_cache[op.input[0]][i]
         for i in range(len(dim)):
             if dim[i] == 0:
                 output_shape[i] = self._output_shape_cache[op.input[0]][i]
                 product *= self._output_shape_cache[op.input[0]][i]
             elif dim[i] == -1:
                 idx = i
                 output_shape[i] = 1
             else:
                 output_shape[i] = dim[i]
                 product *= dim[i]
         if idx != -1:
             output_shape[idx] = input_size / product
         self.add_output_shape(op, [output_shape])
     else:
         output_shape = []
         axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
         end_axis = ConverterUtil.get_arg(
             op, MaceKeyword.mace_end_axis_str).i  # noqa
         end_axis = end_axis if end_axis > 0 else end_axis + len(
             list(self._output_shape_cache[op.input[0]]))
         dim = 1
         for i in range(0, axis):
             output_shape.append(self._output_shape_cache[op.input[0]][i])
         for i in range(axis, end_axis + 1):
             dim *= self._output_shape_cache[op.input[0]][i]
         output_shape.append(-1)
         for i in range(end_axis + 1,
                        len(list(self._output_shape_cache[op.input[0]]))):
             output_shape.append(self._output_shape_cache[op.input[0]][i])
         output_shape[axis] = dim
         self.add_output_shape(op, [output_shape])
Пример #32
0
    def infer_shape_deconv(self, op):
        input_shape = self._output_shape_cache[op.input[0]]
        output_shape = np.zeros_like(input_shape)
        filter_shape = self._output_shape_cache[op.input[1]]

        paddings = ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_padding_values_str).ints  # noqa
        strides = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str).ints
        dilations_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_dilations_str)
        if dilations_arg is not None:
            dilations = dilations_arg.ints
        else:
            dilations = [1, 1]
        round_func = math.floor

        output_shape[0] = input_shape[0]
        if ConverterUtil.data_format(op) == DataFormat.NCHW \
                and ConverterUtil.filter_format(self._net) == FilterFormat.OIHW:  # noqa
            # filter format: IOHW
            output_shape[1] = filter_shape[1]
            output_shape[2] = int(
                round_func((input_shape[2] - 1) * strides[0] +
                           (filter_shape[2] - 1) * (dilations[0] - 1) +
                           filter_shape[2] - paddings[0]))
            output_shape[3] = int(
                round_func((input_shape[3] - 1) * strides[1] +
                           (filter_shape[3] - 1) * (dilations[1] - 1) +
                           filter_shape[3] - paddings[1]))
        else:
            mace_check(False,
                       "Mace can only infer shape for"
                       " NCHW input and OIHW filter")
        print("deconv layer %s (%s) input:%s filter:%s output:%s" %
              (op.name, op.type, input_shape, filter_shape, output_shape))

        self.add_output_shape(op, [output_shape])
Пример #33
0
 def run(self):
     self.use_uint8_in_out()
     self.add_op_output_type()
     self.ensure_bias_vector()
     self.common_check()
     if ConverterUtil.get_arg(self._model.op[0],
                              MaceKeyword.mace_framework_type_str).i == \
        FrameworkType.TENSORFLOW.value:
         self.add_tensorflow_padding_value()
     const_data_num_arg = self._model.arg.add()
     const_data_num_arg.name = MaceKeyword.mace_const_data_num_arg_str
     const_data_num_arg.i = len(self._model.tensors)
     self.convert_ops()
     self.add_node_id()
     return self._model
Пример #34
0
def main(unused_args):
    mace_check(os.path.isfile(FLAGS.model_file),
               "Input graph file '" + FLAGS.model_file + "' does not exist!")
    mace_check(os.path.isdir(FLAGS.output_dir),
               "Output directory '" + FLAGS.output_dir + "' does not exist!")
    net_def = mace_pb2.NetDef()
    with open(FLAGS.model_file, "rb") as f:
        net_def.ParseFromString(f.read())

    quantize_flag = ConverterUtil.get_arg(
        net_def, MaceKeyword.mace_quantize_flag_arg_str)
    quantize_flag = False if quantize_flag is None else quantize_flag.i == 1
    hexagon_flag = False
    index = 0
    end_index = len(net_def.op)
    if quantize_flag:
        while index < end_index:
            # omit op quantize
            if net_def.op[index].type == MaceOp.Quantize.name or \
                    net_def.op[index].type == \
                    HexagonOp.QuantizeINPUT_f_to_8.name:
                index += 1
            # omit op dequantize
            elif net_def.op[end_index - 1].type == MaceOp.Dequantize.name or \
                    net_def.op[end_index - 1].type == \
                    HexagonOp.DequantizeOUTPUT_8tof.name:
                end_index -= 1
            else:
                break
        mace_check(0 < index < end_index < len(net_def.op),
                   "Wrong number of op quantize(%d) or dequantize(%d)." %
                   (index, len(net_def.op) - end_index))
        if net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name:
            hexagon_flag = True
    # omit original output
    end_index -= 1

    data_format = net_def.output_info[0].data_format
    output_configs = {"subgraphs": []}
    while index < end_index:
        # omit BatchToSpaceND and op before that due to changed graph
        if net_def.op[index].type == MaceOp.BatchToSpaceND.name or \
                net_def.op[index].type == HexagonOp.BatchToSpaceND_8.name or \
                (index + 1 < end_index and
                 (net_def.op[index + 1].type == MaceOp.BatchToSpaceND.name or
                  net_def.op[index + 1].type == HexagonOp.BatchToSpaceND_8.name)):  # noqa
            index += 1
            continue
        net = copy.deepcopy(net_def)
        if hexagon_flag:
            # reuse dequantize op and it's min/max tensor's node_id
            del net.op[index+1:end_index+1]
        else:
            del net.op[index+1:]
        del net.output_info[:]
        op = net.op[index]
        index += 1

        output_tensors = []
        output_shapes = []
        op_name = op.name
        if quantize_flag:
            op.name = MaceKeyword.mace_output_node_name + '_' + op.name
        if hexagon_flag:
            mace_check(len(op.output) == 1,
                       "Only supports number of outputs of Hexagon op be 1.")
        for i in range(len(op.output)):
            output_tensors.append(str(op.output[i]))
            output_shapes.append(
                ",".join([str(dim) for dim in op.output_shape[i].dims]))
            # modify output info
            output_info = net.output_info.add()
            output_info.name = op.output[i]
            output_info.data_format = data_format
            output_info.dims.extend(op.output_shape[i].dims)
            output_info.data_type = mace_pb2.DT_FLOAT
            # modify output op
            if quantize_flag:
                output_name = op.output[i]
                new_output_name = \
                    MaceKeyword.mace_output_node_name + '_' + op.output[i]
                op.output[i] = new_output_name
                if not hexagon_flag:
                    dequantize_op = net.op.add()
                    dequantize_op.name = normalize_op_name(output_name)
                    dequantize_op.type = MaceOp.Dequantize.name
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    output_shape = dequantize_op.output_shape.add()
                    output_shape.dims.extend(op.output_shape[i].dims)
                    dequantize_op.output_type.append(mace_pb2.DT_FLOAT)
                    ConverterUtil.add_data_type_arg(dequantize_op,
                                                    mace_pb2.DT_UINT8)
                else:
                    dequantize_op = net.op[-1]
                    dequantize_op.name = normalize_op_name(output_name)
                    del dequantize_op.input[:]
                    del dequantize_op.output[:]
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    input_min = new_output_name[:-1] + '1'
                    input_max = new_output_name[:-1] + '2'
                    dequantize_op.input.extend([input_min, input_max])
                    dequantize_op.node_input[0].node_id = op.node_id
                    dequantize_op.node_input[1].node_id = op.node_id
                    dequantize_op.node_input[2].node_id = op.node_id

        model_path = save_model_to_proto(net, normalize_op_name(op_name),
                                         FLAGS.output_dir)
        output_config = {"model_file_path": str(model_path),
                         "output_tensors": output_tensors,
                         "output_shapes": output_shapes}
        output_configs["subgraphs"].append(output_config)

    output_configs_path = FLAGS.output_dir + "outputs.yml"
    with open(output_configs_path, "w") as f:
        yaml.dump(output_configs, f, default_flow_style=False)
Пример #35
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,  # noqa
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.Split.name: self.convert_split,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self.add_shape_info(tf_graph_def)

        print("Run transform_graph: %s" %
              TFTransformGraphOptions[option.device])
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(
                tf_graph_def, option.input_nodes.keys(),
                option.output_nodes.keys(),
                TFTransformGraphOptions[option.device])
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()
        self._output_shape_list = []
        self._output_shape_op_list = []
Пример #36
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())
        self.add_shape_info(tf_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(tf_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()
Пример #37
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_reduce,
            TFOpType.Max.name: self.convert_reduce,
            TFOpType.Mean.name: self.convert_reduce,
            TFOpType.Prod.name: self.convert_reduce,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,  # noqa
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.GatherV2.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.Split.name: self.convert_split,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
            TFOpType.Cumsum.name: self.convert_cumsum,
            TFOpType.OneHot.name: self.convert_one_hot,
            TFOpType.Sum.name: self.convert_reduce,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self._skip_tensor = set()
        self._output_shape = {}

        print("Run transform_graph: %s" % TFTransformGraphOptions)
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(tf_graph_def,
                                                   option.input_nodes.keys(),
                                                   option.output_nodes.keys(),
                                                   TFTransformGraphOptions)
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        # To check optimized model, uncomment following code.
        # tf.io.write_graph(
        #     transformed_graph_def,
        #     ".",
        #     os.path.basename(src_model_file)[:-3] + "_opt.pb",
        #     as_text=False
        # )

        self.add_shape_info(transformed_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
                self.update_output_shapes(session)

        # we have polluted graph with 'shape' ops, so reset it and reload it
        # again
        tf.reset_default_graph()

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
Пример #38
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self.add_shape_info(tf_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(tf_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()