Exemple #1
0
    def convert_reduce(self, op):
        self.add_min_max_const_node(op, op.input[0])
        reduce_type_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_reduce_type_str)
        mace_check(reduce_type_arg.i == ReduceType.MEAN.value,
                   "Hexagon Reduce only supports Mean now.")
        keep_dims_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_keepdims_str)
        mace_check(keep_dims_arg.i == 1,
                   "Hexagon Reduce Mean only supports keep dims now.")
        axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
        mace_check(1 <= len(axis_arg.ints) <= 2,
                   "Hexagon Reduce Mean only supports spatial now.")
        for i in axis_arg.ints:
            mace_check(1 <= i <= 2,
                       "Hexagon Reduce Mean only supports spatial now")
        producer_op_name, _ = get_op_and_port_from_tensor(op.input[0])
        input_dims = None
        for producer_op in self._model.op:
            if producer_op.name == producer_op_name:
                input_dims = producer_op.output_shape[0].dims
                break
        mace_check(input_dims is not None, "Missing input shape.")
        if len(axis_arg.ints) == 1:
            dim1, dim2 = (input_dims[1], 1) \
                if axis_arg.ints[0] == 1 else (1, input_dims[2])
        else:
            dim1, dim2 = input_dims[1], input_dims[2]
        self.add_arg_const_node(op, '/window:0', [1, dim1, dim2, 1])
        self.add_arg_const_node(op, '/strides:0', [1, dim1, dim2, 1])

        op.type = HexagonOp.QuantizedAvgPool_8.name
Exemple #2
0
    def convert_conv2d(self, op):
        channels = op.output_shape[0].dims[3]
        if len(op.input) < 3:
            print('Supernode requires biasadd, we add it.')
            bias_data = np.zeros(channels, dtype=int)
            bias_tensor = self._model.tensors.add()
            bias_tensor.data_type = mace_pb2.DT_INT32
            bias_tensor.dims.extend([channels])
            bias_tensor.int32_data.extend(bias_data)
            bias_tensor.minval = 0
            bias_tensor.maxval = 0
            bias_tensor.name = op.name + "/bias:0"
            bias = bias_tensor.name
            self._consts[bias] = bias_tensor
        else:
            bias = op.input.pop()

        self.add_min_max_const_node(op, op.input[0])
        self.add_min_max_const_node(op, op.input[1])

        strides_arg = ConverterUtil.get_arg(op, 'strides')
        mace_check(strides_arg is not None,
                   "Missing strides of Conv or Depthwise Conv.")
        self.add_arg_const_node(
            op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1])

        op.input.append(bias)
        self.add_min_max_const_node(op, bias)
        self.add_min_max_const_node(op, op.output[0], True, True, False)

        if op.type == MaceOp.DepthwiseConv2d.name:
            op.type = HexagonOp.DepthwiseSupernode_8x8p32to8.name
        else:
            op.type = HexagonOp.Supernode_8x8p32to8.name
Exemple #3
0
    def add_min_max_const_node(self,
                               this_op,
                               tensor_name,
                               add_min=True,
                               add_max=True,
                               diff_port=True):
        op, port = get_op_and_port_from_tensor(tensor_name)
        mace_check(port == 0, 'port should be 0 to add min max tensor then.')
        if tensor_name in self._quantize_activation_info:
            quantize_info = self._quantize_activation_info[tensor_name]
            minval = quantize_info.minval
            maxval = quantize_info.maxval
            is_activation = True
        elif tensor_name in self._consts:
            tensor = self._consts[tensor_name]
            minval = tensor.minval
            maxval = tensor.maxval
            is_activation = False
        else:
            raise Exception('Quantize info not found: ', tensor_name)

        if add_min:
            if is_activation and diff_port:
                min_tensor_name = op + ':1'
            else:
                min_tensor_name = op + '_min:0'
                self.add_const_node(min_tensor_name, minval)
            this_op.input.extend([min_tensor_name])
        if add_max:
            if is_activation and diff_port:
                max_tensor_name = op + ':2'
            else:
                max_tensor_name = op + '_max:0'
                self.add_const_node(max_tensor_name, maxval)
            this_op.input.extend([max_tensor_name])
Exemple #4
0
def parse_data_type(str):
    if str == "float32":
        return mace_pb2.DT_FLOAT
    elif str == "int32":
        return mace_pb2.DT_INT32
    else:
        mace_check(False, "data type %s not supported" % str)
Exemple #5
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name',
                        type=str,
                        help="the namespace of gernerated code")
    parser.add_argument('--model_file', type=str, help="model file")
    parser.add_argument('--params_file', type=str, help="params file")
    parser.add_argument('--device',
                        type=str,
                        default='cpu',
                        help="cpu/gpu/hexagon/hta/apu")
    parser.add_argument('--config', type=str, help="model config")
    parser.add_argument("--no_obfuscate",
                        action="store_true",
                        help="obfuscate model names")
    parser.add_argument("--gencode_model",
                        action="store_true",
                        help="generate model code")
    parser.add_argument("--gencode_param",
                        action="store_true",
                        help="generate params code")
    parser.add_argument('--output',
                        type=str,
                        default="build",
                        help="output dir")

    flgs, _ = parser.parse_known_args()
    mace_check(flgs.model_name not in CPP_KEYWORDS, "model name cannot be cpp"
               "keywords")
    return flgs
    def convert_pad(self, tf_op):
        op = self.convert_general_op(tf_op)
        op.type = MaceOp.Pad.name
        del op.input[1:]

        paddings_arg = op.arg.add()
        paddings_arg.name = MaceKeyword.mace_paddings_str
        paddings_value = tf_op.inputs[1].eval().astype(np.int32).flat
        paddings_arg.ints.extend(paddings_value)
        self._skip_tensor.add(tf_op.inputs[1].name)

        pad_type_arg = op.arg.add()
        pad_type_arg.name = MaceKeyword.mace_pad_type_str

        if tf_op.type == TFOpType.Pad or tf_op.type == TFOpType.PadV2:
            if len(tf_op.inputs) == 3:
                constant_value_arg = op.arg.add()
                constant_value_arg.name = MaceKeyword.mace_constant_value_str
                constant_value = tf_op.inputs[2].eval().flat[0]
                tf_dt = tf_op.inputs[2].dtype
                if tf_dt == tf.float32:
                    constant_value_arg.f = constant_value
                elif tf_dt == tf.int32:
                    constant_value_arg.i = constant_value
                else:
                    mace_check(False, "Unsupported data type: %s" % tf_dt.name)
                self._skip_tensor.add(tf_op.inputs[2].name)

            pad_type_arg.i = PadType.CONSTANT.value

        elif tf_op.type == TFOpType.MirrorPad:
            pad_type_arg.i = self.pad_type[tf_op.get_attr('mode')].value
Exemple #7
0
    def convert_folded_batchnorm(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        op.type = MaceOp.BatchNorm.name

        scale_op = None
        for consumer in self._caffe_net.get_consumers(caffe_op.layer.top[0]):
            if consumer.type == 'Scale':
                scale_op = consumer
        mace_check(scale_op is not None, "batchnorm is not followed by scale")
        self._skip_ops.append(scale_op)

        epsilon_value = caffe_op.layer.batch_norm_param.eps
        mace_check(caffe_op.blobs[2][0] != 0, "batchnorm scalar is zero")
        mean_value = (1. / caffe_op.blobs[2][0]) * caffe_op.blobs[0]
        var_value = (1. / caffe_op.blobs[2][0]) * caffe_op.blobs[1]
        gamma_value = scale_op.blobs[0]
        beta_value = np.zeros_like(mean_value)
        if len(scale_op.blobs) == 2:
            beta_value = scale_op.blobs[1]

        scale_value = (
            (1.0 / np.vectorize(math.sqrt)(var_value + epsilon_value)) *
            gamma_value).reshape(-1)
        offset_value = ((-mean_value * scale_value) + beta_value).reshape(-1)

        input_names = [op.name + '_scale', op.name + '_offset']
        self.add_tensor(input_names[0],
                        scale_value.reshape(-1).shape, mace_pb2.DT_FLOAT,
                        scale_value)
        self.add_tensor(input_names[1],
                        offset_value.reshape(-1).shape, mace_pb2.DT_FLOAT,
                        offset_value)
        op.input.extend([name for name in input_names])
        op.output[:] = scale_op.layer.top[:]
    def convert_ops(self, sess):
        for tf_op in self._tf_graph.get_operations():
            mace_check(
                tf_op.type in self._op_converters,
                "Mace does not support tensorflow op type %s yet" % tf_op.type)
            self._op_converters[tf_op.type](tf_op)

        self.convert_tensors()
Exemple #9
0
 def convert_ops(self):
     print("Convert mace graph to hexagon.")
     for op in self._model.op:
         mace_check(
             op.type in self._op_converters,
             "Mace Hexagon does not support op type %s yet" % op.type)
         self.pre_convert(op)
         self._op_converters[op.type](op)
         self.post_convert(op)
Exemple #10
0
 def add_size_tensor_from_arg(self, op, keyword):
     size_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(len(size_value_arg.ints) == 2,
                op.name + ': ' + keyword + ' value does not have size 2')
     size_value_tensor = self._model.tensors.add()
     size_value_tensor.name = op.name + '/' + keyword + ':0'
     size_value_tensor.data_type = mace_pb2.DT_INT32
     size_value_tensor.dims.extend([2])
     size_value_tensor.int32_data.extend(size_value_arg.ints)
     op.input.extend([size_value_tensor.name])
Exemple #11
0
 def add_int_list_tensor_from_arg(self, op, keyword):
     list_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(list_value_arg.ints is not None,
                op.name + ': ' + keyword + ' value ints should not be None')
     list_value_tensor = self._model.tensors.add()
     list_value_tensor.name = op.name + '/' + keyword + ':0'
     list_value_tensor.data_type = mace_pb2.DT_INT32
     list_value_tensor.dims.extend([len(list_value_arg.ints)])
     list_value_tensor.int32_data.extend(list_value_arg.ints)
     op.input.extend([list_value_tensor.name])
Exemple #12
0
 def add_int_tensor_from_arg(self, op, keyword):
     int_value_arg = ConverterUtil.get_arg(op, keyword)
     mace_check(int_value_arg.i is not None,
                op.name + ': ' + keyword + ' value i should not be None')
     int_value_tensor = self._model.tensors.add()
     int_value_tensor.name = op.name + '/' + keyword + ':0'
     int_value_tensor.data_type = mace_pb2.DT_INT32
     int_value_tensor.dims.extend([1])
     int_value_tensor.int32_data.extend([int_value_arg.i])
     op.input.extend([int_value_tensor.name])
Exemple #13
0
 def add_padding_tensor_from_arg(self, op):
     padding_value_arg = ConverterUtil.get_arg(
                         op, MaceKeyword.mace_padding_values_str)
     mace_check(len(padding_value_arg.ints) == 4,
                op.name + ': padding value does not have size 4')
     padding_value_tensor = self._model.tensors.add()
     padding_value_tensor.name = op.name + '/padding:0'
     padding_value_tensor.data_type = mace_pb2.DT_INT32
     padding_value_tensor.dims.extend([4])
     padding_value_tensor.int32_data.extend(padding_value_arg.ints)
     op.input.extend([padding_value_tensor.name])
Exemple #14
0
    def convert_cast(self, tf_op):
        op = self.convert_general_op(tf_op)
        op.type = MaceOp.Cast.name

        try:
            dtype = tf_op.get_attr('DstT')
            if dtype == tf.int32:
                op.output_type.extend([mace_pb2.DT_INT32])
            elif dtype == tf.float32:
                op.output_type.extend([mace_pb2.DT_FLOAT])
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            op.output_type.extend([mace_pb2.DT_FLOAT])
Exemple #15
0
 def convert_ops(self):
     layer_names = set()
     for layer in self._caffe_layers.layer:
         caffe_op = self._caffe_net.get_op(layer.name)
         if caffe_op not in self._skip_ops:
             mace_check(
                 layer.name not in layer_names,
                 "There is duplicate layer name '%s' in your model" %
                 layer.name)
             mace_check(
                 layer.type in self._op_converters,
                 "Mace does not support caffe op type %s yet" % layer.type)
             layer_names.add(layer.name)
             self._op_converters[layer.type](caffe_op)
Exemple #16
0
    def convert_slice(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        op.type = MaceOp.Split.name

        if caffe_op.layer.HasField('slice_param'):
            param = caffe_op.layer.slice_param
            mace_check(
                not param.HasField('axis') or param.axis == 1
                or param.axis == -3,
                "Mace do not support slice with axis %d" % param.axis)
            mace_check(
                len(param.slice_point) == 0,
                "Mace do not support slice with slice_point")
        axis_arg = op.arg.add()
        axis_arg.name = MaceKeyword.mace_axis_str
        axis_arg.i = 1
Exemple #17
0
    def convert_interp(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        param = caffe_op.layer.interp_param
        mace_check(
            param.HasField("height") and param.HasField("width"),
            'Only support bilinear interp with height and width')
        op.type = MaceOp.ResizeBilinear.name

        size_arg = op.arg.add()
        size_arg.name = MaceKeyword.mace_resize_size_str
        size_value = np.array([param.height, param.width], dtype=np.int32)
        size_arg.ints.extend(size_value)
        # interp op's `align_corners` param is always true in caffe
        align_corners_arg = op.arg.add()
        align_corners_arg.name = MaceKeyword.mace_align_corners_str
        align_corners_arg.i = 1
Exemple #18
0
    def convert_conv2d(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        param = caffe_op.layer.convolution_param
        is_depthwise = False
        if param.HasField(caffe_group_str) and param.group > 1:
            filter_data = caffe_op.blobs[0]
            mace_check(
                param.group == filter_data.shape[0]
                and filter_data.shape[1] == 1,
                "Mace do not support group convolution yet")
            is_depthwise = True
            caffe_op.blobs[0] = filter_data.reshape(1, filter_data.shape[0],
                                                    filter_data.shape[2],
                                                    filter_data.shape[3])

        if is_depthwise:
            op.type = MaceOp.DepthwiseConv2d.name
        else:
            op.type = MaceOp.Conv2D.name

        self.add_stride_pad_kernel_arg(param, op)
        # dilation is specific for convolution in caffe
        dilations = [1, 1]
        if len(param.dilation) > 0:
            dilation_arg = op.arg.add()
            dilation_arg.name = MaceKeyword.mace_dilations_str
            if len(param.dilation) == 1:
                dilations = [param.dilation[0], param.dilation[0]]
            elif len(param.dilation) == 2:
                dilations = [param.dilation[0], param.dilation[1]]
            dilation_arg.ints.extend(dilations)

        filter_tensor_name = op.name + '_filter'
        filter_data = caffe_op.blobs[0]
        self.add_tensor(filter_tensor_name, filter_data.shape,
                        mace_pb2.DT_FLOAT, filter_data)
        op.input.extend([filter_tensor_name])

        if len(caffe_op.blobs) == 2:
            bias_tensor_name = op.name + '_bias'
            bias_data = caffe_op.blobs[1]
            # caffe of old version has 4-dimension bias, so reshape it
            # to single dimension
            self.add_tensor(bias_tensor_name,
                            bias_data.reshape(-1).shape, mace_pb2.DT_FLOAT,
                            bias_data)
            op.input.extend([bias_tensor_name])
Exemple #19
0
def merge_opencl_binaries(opencl_binaries,
                          output_file):
    platform_info_key = 'mace_opencl_precompiled_platform_info_key'

    kvs = {}
    for binary in opencl_binaries:
        if not os.path.exists(binary):
            MaceLogger.warning("OpenCL bin %s not found" % binary)
            continue

        with open(binary, "rb") as f:
            binary_array = np.fromfile(f, dtype=np.uint8)

        idx = 0
        size, = struct.unpack("Q", binary_array[idx:idx + 8])
        idx += 8
        for _ in range(size):
            key_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            key, = struct.unpack(
                str(key_size) + "s", binary_array[idx:idx + key_size])
            idx += key_size
            value_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            if key == platform_info_key and key in kvs:
                mace_check(
                    (kvs[key] == binary_array[idx:idx + value_size]).all(),
                    "There exists more than one OpenCL version for models:"
                    " %s vs %s " %
                    (kvs[key], binary_array[idx:idx + value_size]))
            else:
                kvs[key] = binary_array[idx:idx + value_size]
            idx += value_size

    output_byte_array = bytearray()
    data_size = len(kvs)
    output_byte_array.extend(struct.pack("Q", data_size))
    for key, value in kvs.items():
        key_size = len(key)
        output_byte_array.extend(struct.pack("i", key_size))
        output_byte_array.extend(struct.pack(str(key_size) + "s", key))
        value_size = len(value)
        output_byte_array.extend(struct.pack("i", value_size))
        output_byte_array.extend(value)

    np.array(output_byte_array).tofile(output_file)
Exemple #20
0
    def run(self):
        if self._option.device == DeviceType.HTA.value:
            mace_check(
                len(self._option.input_nodes) == 1
                and len(self._option.output_nodes) == 1,
                'hta only support single input and output')

        for tensor in self._model.tensors:
            self._consts[tensor.name] = tensor

        # convert op node
        self.convert_ops()

        self.convert_input_output_node()

        self.add_node_id()

        return self._model
Exemple #21
0
    def convert_elementwise(self, op):
        self.add_min_max_const_node(op, op.input[0])
        self.add_min_max_const_node(op, op.input[1])

        element_type = \
            ConverterUtil.get_arg(op,
                                  MaceKeyword.mace_element_type_str).i
        if element_type == EltwiseType.SUM.value:
            self.add_min_max_const_node(op, op.output[0], True, True, False)
            op.type = HexagonOp.QuantizedAdd_8p8to8.name
        elif element_type == EltwiseType.SUB.value:
            self.add_min_max_const_node(op, op.output[0], True, True, False)
            op.type = HexagonOp.QuantizedSub_8p8to8.name
        elif element_type == EltwiseType.PROD.value:
            op.type = HexagonOp.QuantizedMul_8x8to8.name
        else:
            mace_check(
                False, "Hexagon does not support elementwise %s" %
                EltwiseType(element_type).name)
Exemple #22
0
    def convert_tensors(self):
        for tf_op in self._tf_graph.get_operations():
            if tf_op.type != TFOpType.Const.name:
                continue
            output_name = tf_op.outputs[0].name
            if output_name not in self._skip_tensor:
                tensor = self._mace_net_def.tensors.add()
                tensor.name = tf_op.outputs[0].name
                tf_tensor = tf_op.outputs[0].eval()
                tensor.dims.extend(list(tf_tensor.shape))

                tf_dt = tf_op.get_attr('dtype')
                if tf_dt == tf.float32:
                    tensor.data_type = mace_pb2.DT_FLOAT
                    tensor.float_data.extend(tf_tensor.astype(np.float32).flat)
                elif tf_dt == tf.int32:
                    tensor.data_type = mace_pb2.DT_INT32
                    tensor.int32_data.extend(tf_tensor.astype(np.int32).flat)
                else:
                    mace_check(False,
                               "Not supported tensor type: %s" % tf_dt.name)
Exemple #23
0
    def convert_fully_connected(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        param = caffe_op.layer.inner_product_param
        op.type = MaceOp.FullyConnected.name

        mace_check((param.axis == 1 or param.axis == -3)
                   and not param.transpose,
                   "Do not support non-default axis and transpose")
        mace_check(caffe_op.blobs[0].ndim in [2, 4],
                   "Unexpected fc weigth ndim.")
        if caffe_op.blobs[0].ndim == 4:
            mace_check(
                list(caffe_op.blobs[0].shape[:2]) == [1, 1],
                "Do not support 4D weight with shape [1, 1, *, *]")

        weight_tensor_name = op.name + '_weight'
        weight_data = caffe_op.blobs[0].reshape(param.num_output, -1)
        self.add_tensor(weight_tensor_name, weight_data.shape,
                        mace_pb2.DT_FLOAT, weight_data)
        op.input.extend([weight_tensor_name])

        if len(caffe_op.blobs) == 2:
            bias_tensor_name = op.name + '_bias'
            bias_data = caffe_op.blobs[1]
            self.add_tensor(bias_tensor_name,
                            bias_data.reshape(-1).shape, mace_pb2.DT_FLOAT,
                            bias_data)
            op.input.extend([bias_tensor_name])
Exemple #24
0
    def convert_general_op(self, tf_op):
        op = self._mace_net_def.op.add()
        op.name = tf_op.name
        op.type = tf_op.type
        op.input.extend([tf_input.name for tf_input in tf_op.inputs])
        op.output.extend([tf_output.name for tf_output in tf_op.outputs])
        for tf_output in tf_op.outputs:
            output_shape = op.output_shape.add()
            self.infer_tensor_shape(tf_output, output_shape)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        try:
            dtype = tf_op.get_attr('T')
            if dtype == tf.int32:
                data_type_arg.i = mace_pb2.DT_INT32
            elif dtype == tf.float32:
                data_type_arg.i = self._option.data_type
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            try:
                dtype = tf_op.get_attr('SrcT')
                if dtype == tf.int32 or dtype == tf.bool:
                    data_type_arg.i = mace_pb2.DT_INT32
                elif dtype == tf.float32:
                    data_type_arg.i = self._option.data_type
                else:
                    mace_check(False, "data type %s not supported" % dtype)
            except ValueError:
                data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.TENSORFLOW.value

        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        return op
Exemple #25
0
    def convert_conv2d(self, tf_op):
        op = self.convert_general_op(tf_op)
        if tf_op.type == TFOpType.DepthwiseConv2dNative.name:
            op.type = MaceOp.DepthwiseConv2d.name
        elif tf_op.type == TFOpType.Conv2DBackpropInput.name:
            op.type = MaceOp.Deconv2D.name
        else:
            op.type = MaceOp.Conv2D.name

        padding_arg = op.arg.add()
        padding_arg.name = MaceKeyword.mace_padding_str
        padding_arg.i = self.padding_mode[tf_op.get_attr(tf_padding_str)].value
        strides_arg = op.arg.add()
        strides_arg.name = MaceKeyword.mace_strides_str
        strides_arg.ints.extend(tf_op.get_attr(tf_strides_str)[1:3])
        if op.type != MaceOp.Deconv2D.name:
            dilation_arg = op.arg.add()
            dilation_arg.name = MaceKeyword.mace_dilations_str
            try:
                dilation_val = tf_op.get_attr(tf_dilations_str)[1:3]
            except ValueError:
                dilation_val = [1, 1]
            dilation_arg.ints.extend(dilation_val)
        else:
            try:
                dilation_val = tf_op.get_attr(tf_dilations_str)[1:3]
            except ValueError:
                dilation_val = [1, 1]
            mace_check(dilation_val[0] == 1 and dilation_val[1] == 1,
                       "Mace only supports dilation == 1 conv2d_transpose.")
            mace_check(
                len(tf_op.inputs) >= 3, "deconv should have (>=) 3 inputs.")
            del op.input[:]
            op.input.extend([
                tf_op.inputs[2].name, tf_op.inputs[1].name,
                tf_op.inputs[0].name
            ])
Exemple #26
0
    def add_op_output_type(self):
        type_map = {}
        for input_info in self._model.input_info:
            # will do input quantize in wrapper
            type_map[input_info.name] = mace_pb2.DT_UINT8

        for op in self._model.op:
            if len(op.output_type) >= 1:
                print([op.name, len(op.output), len(op.output_type)])
                type_map[op.output[0]] = op.output_type[0]
                continue
            mace_check(op.input[0] in type_map,
                       op.input[0] + ' not in type_map')
            op.output_type.extend([type_map[op.input[0]]])
            type_map[op.output[0]] = op.output_type[0]

        for op in self._model.op:
            mace_check(len(op.output) == len(op.output_type),
                       op.name + ': length of output and output_type not'
                       ' match')
            mace_check(op.output_type[0] == mace_pb2.DT_UINT8
                       or op.output_type[0] == mace_pb2.DT_INT32,
                       op.name + ': apu only support quantized node')
Exemple #27
0
 def get_blob(self, index):
     mace_check(index < len(self._blobs), "blob out of index")
     return self._blobs[index]
Exemple #28
0
    def convert_prior_box(self, caffe_op):
        op = self.convert_general_op(caffe_op)
        param = caffe_op.layer.prior_box_param
        op.type = MaceOp.PriorBox.name

        min_size_arg = op.arg.add()
        min_size_arg.name = MaceKeyword.mace_min_size_str
        min_size_arg.floats.extend(list(param.min_size))
        max_size_arg = op.arg.add()
        max_size_arg.name = MaceKeyword.mace_max_size_str
        max_size_arg.floats.extend(list(param.max_size))
        flip_arg = op.arg.add()
        flip_arg.name = MaceKeyword.mace_flip_str
        flip_arg.i = 1
        if param.HasField('flip'):
            flip_arg.i = int(param.flip)
        aspect_ratio = [1.0]
        for i in param.aspect_ratio:
            already_exist = False
            for ar in aspect_ratio:
                if abs(i - ar) < 1e-6:
                    already_exist = True
                    break
            if not already_exist:
                aspect_ratio.append(i)
                if flip_arg.i:
                    aspect_ratio.append(1.0 / i)
        aspect_ratio_arg = op.arg.add()
        aspect_ratio_arg.name = MaceKeyword.mace_aspect_ratio_str
        aspect_ratio_arg.floats.extend(list(aspect_ratio))
        clip_arg = op.arg.add()
        clip_arg.name = MaceKeyword.mace_clip_str
        clip_arg.i = 0
        if param.HasField('clip'):
            clip_arg.i = int(param.clip)
        variance_arg = op.arg.add()
        variance_arg.name = MaceKeyword.mace_variance_str
        variance_arg.floats.extend(list(param.variance))
        offset_arg = op.arg.add()
        offset_arg.name = MaceKeyword.mace_offset_str
        offset_arg.f = 0.5
        if param.HasField('offset'):
            offset_arg.f = param.offset
        step_h_arg = op.arg.add()
        step_h_arg.name = MaceKeyword.mace_step_h_str
        step_h_arg.f = 0
        if param.HasField('step_h'):
            mace_check(
                not param.HasField('step'),
                "Either step or step_h/step_w should be specified; not both."
            )  # noqa
            step_h_arg.f = param.step_h
            mace_check(step_h_arg.f > 0, "step_h should be larger than 0.")
        step_w_arg = op.arg.add()
        step_w_arg.name = MaceKeyword.mace_step_w_str
        step_w_arg.f = 0
        if param.HasField('step_w'):
            mace_check(
                not param.HasField('step'),
                "Either step or step_h/step_w should be specified; not both."
            )  # noqa
            step_w_arg.f = param.step_w
            mace_check(step_w_arg.f > 0, "step_w should be larger than 0.")

        if param.HasField('step'):
            mace_check(
                not param.HasField('step_h')
                and not param.HasField('step_w'),  # noqa
                "Either step or step_h/step_w should be specified; not both."
            )  # noqa
            mace_check(param.step > 0, "step should be larger than 0.")
            step_h_arg.f = param.step
            step_w_arg.f = param.step
Exemple #29
0
def convert(model_file, output_dir, layers):
    mace_check(os.path.isfile(model_file),
               "Input graph file '" + model_file + "' does not exist!")
    mace_check(os.path.isdir(output_dir),
               "Output directory '" + output_dir + "' does not exist!")
    net_def = mace_pb2.NetDef()
    with open(model_file, "rb") as f:
        net_def.ParseFromString(f.read())

    quantize_flag = ConverterUtil.get_arg(
        net_def, MaceKeyword.mace_quantize_flag_arg_str)
    quantize_flag = False if quantize_flag is None else quantize_flag.i == 1
    hexagon_flag = False
    index = 0
    end_index = len(net_def.op)
    if quantize_flag:
        while index < end_index:
            # omit op quantize
            if net_def.op[index].type == MaceOp.Quantize.name or \
                    net_def.op[index].type == \
                    HexagonOp.QuantizeINPUT_f_to_8.name:
                index += 1
            # omit op dequantize
            elif net_def.op[end_index - 1].type == MaceOp.Dequantize.name or \
                    net_def.op[end_index - 1].type == \
                    HexagonOp.DequantizeOUTPUT_8tof.name:
                end_index -= 1
            else:
                break
        mace_check(
            0 < index < end_index < len(net_def.op),
            "Wrong number of op quantize(%d) or dequantize(%d)." %
            (index, len(net_def.op) - end_index))
        if net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name:
            hexagon_flag = True
    # omit original output
    end_index -= 1

    index, end_index = handle_index(index, end_index, layers)

    data_format = net_def.output_info[0].data_format
    output_configs = {"subgraphs": []}
    while index < end_index:
        # omit BatchToSpaceND and op before that due to changed graph
        if net_def.op[index].type == MaceOp.BatchToSpaceND.name or \
                net_def.op[index].type == HexagonOp.BatchToSpaceND_8.name or \
                (index + 1 < end_index and
                 (net_def.op[index + 1].type == MaceOp.BatchToSpaceND.name or
                  net_def.op[index + 1].type == HexagonOp.BatchToSpaceND_8.name)):  # noqa
            index += 1
            continue
        net = copy.deepcopy(net_def)
        if hexagon_flag:
            # reuse dequantize op and it's min/max tensor's node_id
            del net.op[index + 1:-1]
        else:
            del net.op[index + 1:]
        del net.output_info[:]
        op = net.op[index]
        index += 1

        output_tensors = []
        output_shapes = []
        op_name = op.name
        if quantize_flag:
            op.name = MaceKeyword.mace_output_node_name + '_' + op.name
        if hexagon_flag:
            mace_check(
                len(op.output) == 1,
                "Only supports number of outputs of Hexagon op be 1.")
        for i in range(len(op.output)):
            output_tensors.append(str(op.output[i]))
            output_shapes.append(",".join(
                [str(dim) for dim in op.output_shape[i].dims]))
            # modify output info
            output_info = net.output_info.add()
            output_info.name = op.output[i]
            output_info.data_format = data_format
            output_info.dims.extend(op.output_shape[i].dims)
            output_info.data_type = mace_pb2.DT_FLOAT
            # modify output op
            if quantize_flag:
                output_name = op.output[i]
                new_output_name = \
                    MaceKeyword.mace_output_node_name + '_' + op.output[i]
                op.output[i] = new_output_name
                if not hexagon_flag:
                    dequantize_op = net.op.add()
                    dequantize_op.name = normalize_op_name(output_name)
                    dequantize_op.type = MaceOp.Dequantize.name
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    output_shape = dequantize_op.output_shape.add()
                    output_shape.dims.extend(op.output_shape[i].dims)
                    dequantize_op.output_type.append(mace_pb2.DT_FLOAT)
                    ConverterUtil.add_data_type_arg(dequantize_op,
                                                    mace_pb2.DT_UINT8)
                else:
                    dequantize_op = net.op[-1]
                    dequantize_op.name = normalize_op_name(output_name)
                    del dequantize_op.input[:]
                    del dequantize_op.output[:]
                    dequantize_op.input.append(new_output_name)
                    dequantize_op.output.append(output_name)
                    input_min = new_output_name[:-1] + '1'
                    input_max = new_output_name[:-1] + '2'
                    dequantize_op.input.extend([input_min, input_max])
                    dequantize_op.node_input[0].node_id = op.node_id
                    dequantize_op.node_input[1].node_id = op.node_id
                    dequantize_op.node_input[2].node_id = op.node_id
                    del dequantize_op.node_input[3:]

        model_path = save_model_to_proto(net, normalize_op_name(op_name),
                                         output_dir)
        output_config = {
            "model_file_path": str(model_path),
            "output_tensors": output_tensors,
            "output_shapes": output_shapes
        }
        output_configs["subgraphs"].append(output_config)

    output_configs_path = output_dir + "outputs.yml"
    with open(output_configs_path, "w") as f:
        yaml.dump(output_configs, f, default_flow_style=False)
Exemple #30
0
def normalize_model_config(conf):
    conf = copy.deepcopy(conf)
    if ModelKeys.subgraphs in conf:
        subgraph = conf[ModelKeys.subgraphs][0]
        del conf[ModelKeys.subgraphs]
        conf.update(subgraph)

    print(conf)
    conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
    conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])

    if ModelKeys.quantize in conf:
        conf[ModelKeys.data_types] = mace_pb2.DT_FLOAT
    else:
        if ModelKeys.data_types in conf:
            conf[ModelKeys.data_types] = parse_internal_data_type(
                conf[ModelKeys.data_types])
        else:
            conf[ModelKeys.data_types] = mace_pb2.DT_HALF

    # parse input
    conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
    input_count = len(conf[ModelKeys.input_tensors])
    conf[ModelKeys.input_shapes] = [
        parse_int_array(shape)
        for shape in to_list(conf[ModelKeys.input_shapes])
    ]
    mace_check(
        len(conf[ModelKeys.input_shapes]) == input_count,
        "input node count and shape count do not match")

    input_data_types = [
        parse_data_type(dt)
        for dt in to_list(conf.get(ModelKeys.input_data_types, ["float32"]))
    ]

    if len(input_data_types) == 1 and input_count > 1:
        input_data_types = [input_data_types[0]] * input_count
    mace_check(
        len(input_data_types) == input_count,
        "the number of input_data_types should be "
        "the same as input tensors")
    conf[ModelKeys.input_data_types] = input_data_types

    input_data_formats = [
        parse_data_format(df)
        for df in to_list(conf.get(ModelKeys.input_data_formats, ["NHWC"]))
    ]
    if len(input_data_formats) == 1 and input_count > 1:
        input_data_formats = [input_data_formats[0]] * input_count
    mace_check(
        len(input_data_formats) == input_count,
        "the number of input_data_formats should be "
        "the same as input tensors")
    conf[ModelKeys.input_data_formats] = input_data_formats

    input_ranges = [
        parse_float_array(r)
        for r in to_list(conf.get(ModelKeys.input_ranges, ["-1.0,1.0"]))
    ]
    if len(input_ranges) == 1 and input_count > 1:
        input_ranges = [input_ranges[0]] * input_count
    mace_check(
        len(input_ranges) == input_count,
        "the number of input_ranges should be "
        "the same as input tensors")
    conf[ModelKeys.input_ranges] = input_ranges

    # parse output
    conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
    output_count = len(conf[ModelKeys.output_tensors])
    conf[ModelKeys.output_shapes] = [
        parse_int_array(shape)
        for shape in to_list(conf[ModelKeys.output_shapes])
    ]
    mace_check(
        len(conf[ModelKeys.output_tensors]) == output_count,
        "output node count and shape count do not match")

    output_data_types = [
        parse_data_type(dt)
        for dt in to_list(conf.get(ModelKeys.output_data_types, ["float32"]))
    ]
    if len(output_data_types) == 1 and output_count > 1:
        output_data_types = [output_data_types[0]] * output_count
    mace_check(
        len(output_data_types) == output_count,
        "the number of output_data_types should be "
        "the same as output tensors")
    conf[ModelKeys.output_data_types] = output_data_types

    output_data_formats = [
        parse_data_format(df)
        for df in to_list(conf.get(ModelKeys.output_data_formats, ["NHWC"]))
    ]
    if len(output_data_formats) == 1 and output_count > 1:
        output_data_formats = [output_data_formats[0]] * output_count
    mace_check(
        len(output_data_formats) == output_count,
        "the number of output_data_formats should be "
        "the same as output tensors")
    conf[ModelKeys.output_data_formats] = output_data_formats

    if ModelKeys.check_tensors in conf:
        conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
        conf[ModelKeys.check_shapes] = [
            parse_int_array(shape)
            for shape in to_list(conf[ModelKeys.check_shapes])
        ]
        mace_check(
            len(conf[ModelKeys.check_tensors]) == len(
                conf[ModelKeys.check_shapes]),
            "check tensors count and shape count do not match.")

    MaceLogger.summary(conf)

    return conf