Ejemplo n.º 1
0
    def convert_reduce(self, op):
        self.add_min_max_const_node(op, op.input[0])
        reduce_type_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_reduce_type_str)
        mace_check(reduce_type_arg.i == ReduceType.MEAN.value,
                   "Hexagon Reduce only supports Mean now.")
        keep_dims_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_keepdims_str)
        mace_check(keep_dims_arg.i == 1,
                   "Hexagon Reduce Mean only supports keep dims now.")
        axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
        mace_check(1 <= len(axis_arg.ints) <= 2,
                   "Hexagon Reduce Mean only supports spatial now.")
        for i in axis_arg.ints:
            mace_check(1 <= i <= 2,
                       "Hexagon Reduce Mean only supports spatial now")
        input_shape = get_input_shape(op.input[0], self._model)
        if len(axis_arg.ints) == 1:
            dim1, dim2 = (input_shape[1], 1) \
                if axis_arg.ints[0] == 1 else (1, input_shape[2])
        else:
            dim1, dim2 = input_shape[1], input_shape[2]
        self.add_arg_const_node(op, '/window:0', [1, dim1, dim2, 1])
        self.add_arg_const_node(op, '/strides:0', [1, dim1, dim2, 1])

        op.type = HexagonOp.QuantizedAvgPool_8.name
Ejemplo n.º 2
0
    def convert_general_op(self, mge_op):
        op = self._mace_net_def.op.add()
        op.name = mge_op.name
        op.type = mgb.cgtools.get_opr_type(mge_op)
        op.input.extend([mge_input.name for mge_input in mge_op.inputs])
        op.output.extend([mge_output.name for mge_output in mge_op.outputs])
        for mge_output in mge_op.outputs:
            output_shape = op.output_shape.add()
            output_shape.dims.extend(mge_output.imm_shape)

        data_type_arg = op.arg.add()
        data_type_arg.name = "T"
        data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.MEGENGINE.value

        # check compute format of megengine
        compute_format = DataFormat.NCHW
        try:
            if "format" in mge_op.params.keys():
                compute_format = self.compute_format_type[
                    mge_op.params["format"]]
        except AttributeError:
            compute_format = DataFormat.NCHW
        ConverterUtil.add_data_format_arg(op, compute_format)

        return op
Ejemplo n.º 3
0
    def convert_reduce(self, op):
        self.add_min_max_const_node(op, op.input[0])
        reduce_type_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_reduce_type_str)
        mace_check(reduce_type_arg.i == ReduceType.MEAN.value,
                   "Hexagon Reduce only supports Mean now.")
        keep_dims_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_keepdims_str)
        mace_check(keep_dims_arg.i == 1,
                   "Hexagon Reduce Mean only supports keep dims now.")
        axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
        mace_check(1 <= len(axis_arg.ints) <= 2,
                   "Hexagon Reduce Mean only supports spatial now.")
        for i in axis_arg.ints:
            mace_check(1 <= i <= 2,
                       "Hexagon Reduce Mean only supports spatial now")
        producer_op_name, _ = get_op_and_port_from_tensor(op.input[0])
        input_dims = None
        for producer_op in self._model.op:
            if producer_op.name == producer_op_name:
                input_dims = producer_op.output_shape[0].dims
                break
        mace_check(input_dims is not None, "Missing input shape.")
        if len(axis_arg.ints) == 1:
            dim1, dim2 = (input_dims[1], 1) \
                if axis_arg.ints[0] == 1 else (1, input_dims[2])
        else:
            dim1, dim2 = input_dims[1], input_dims[2]
        self.add_arg_const_node(op, '/window:0', [1, dim1, dim2, 1])
        self.add_arg_const_node(op, '/strides:0', [1, dim1, dim2, 1])

        op.type = HexagonOp.QuantizedAvgPool_8.name
Ejemplo n.º 4
0
    def convert_resizenearestneighbor(self, op):
        height_scale_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_height_scale_str)
        width_scale_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_width_scale_str)
        if height_scale_arg is not None:
            mace_check(
                width_scale_arg is not None,
                "height scale and width scale should be present at the same time."
            )  # noqa
            if len(op.input) == 2:
                op.input.pop()
            height_scale = height_scale_arg.f
            width_scale = width_scale_arg.f
            producer_op = self._producers[op.input[0]]
            for i in range(len(producer_op.output)):
                if producer_op.output[i] == op.input[0]:
                    input_shape = producer_op.output_shape[i]
                    break
            newdim = [
                int(height_scale * input_shape.dims[1]),
                int(width_scale * input_shape.dims[2])
            ]
            self.add_arg_const_node(op, '/newdim:0', [2], newdim)

        self.add_min_max_const_node(op, op.input[0])

        self.add_resize_args(op)

        op.type = HexagonOp.ResizeNearestNeighbor_8.name
Ejemplo n.º 5
0
    def infer_shape_argmax(self, op):
        input_shape = self._output_shape_cache[op.input[0]]
        output_dim_num = len(input_shape)
        if output_dim_num < 3:
            output_dim_num = 3

        axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
        has_axis = (axis_arg is not None)
        axis_value = 0
        if has_axis:
            axis_value = axis_arg.i
            if axis_value < 0:
                axis_value = len(input_shape) + axis_value

        top_k = ConverterUtil.get_arg(op, MaceKeyword.mace_top_k_str).i
        mace_check(top_k >= 1, "Invalid top_k value")
        out_val = ConverterUtil.get_arg(op, MaceKeyword.mace_out_val_str).i

        if has_axis:  # Produces max_ind or max_val per axis
            output_shape = input_shape
            output_shape[axis_value] = top_k
        else:
            output_shape = [1] * output_dim_num
            output_shape[0] = input_shape[0]
            output_shape[2] = top_k
            if out_val:  # Produces max_ind and max_val
                output_shape[1] = 2

        self.add_output_shape(op, [output_shape])
Ejemplo n.º 6
0
    def convert_general_op_with_input_output(self, keras_op):
        op = self._mace_net_def.op.add()
        op.name = keras_op.name
        data_type_arg = op.arg.add()
        data_type_arg.name = "T"
        data_type_arg.i = dtype2mtype(keras_op.dtype)
        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.KERAS.value
        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        input = get_input(keras_op)
        if isinstance(input, list):
            for e in input:
                op.input.append(e.name)
        else:
            op.input.append(input.name)

        output = get_output(keras_op)
        mace_check(not isinstance(output, list), "only support one output")
        op.output.append(output.name)
        output_shape = op.output_shape.add()
        output_shape.dims.extend(keras_shape2list(output.shape))

        return op
Ejemplo n.º 7
0
    def convert_resizebilinear(self, op):
        resize_size_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_resize_size_str)
        if resize_size_arg is not None:
            newdim = resize_size_arg.ints
        else:
            height_scale_arg = ConverterUtil.get_arg(
                op, MaceKeyword.mace_height_scale_str)
            width_scale_arg = ConverterUtil.get_arg(
                op, MaceKeyword.mace_width_scale_str)
            mace_check(
                height_scale_arg is not None and width_scale_arg is not None,
                "Wrong ResizeBilinear arguments.")
            if len(op.input) == 2:
                op.input.pop()
            height_scale = height_scale_arg.f
            width_scale = width_scale_arg.f
            producer_op = self._producers[op.input[0]]
            for i in range(len(producer_op.output)):
                if producer_op.output[i] == op.input[0]:
                    input_shape = producer_op.output_shape[i]
                    break
            newdim = [
                int(height_scale * input_shape.dims[1]),
                int(width_scale * input_shape.dims[2])
            ]
        self.add_arg_const_node(op, '/newdim:0', [2], newdim)

        self.add_min_max_const_node(op, op.input[0])

        self.add_resize_args(op)

        op.type = HexagonOp.QuantizedResizeBilinear_8.name
Ejemplo n.º 8
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            keras.layers.InputLayer: self.convert_input_layer,
            keras.layers.Flatten: self.convert_flatten,
            keras.layers.Dense: self.convert_dense,
            keras.layers.Conv2D: self.convert_conv2d,
            keras.layers.MaxPooling2D: self.convert_maxpooling2d,
            keras.layers.Dropout: self.convert_dropout,
            keras.layers.DepthwiseConv2D: self.convert_depthwise_conv2d,
            keras.layers.Softmax: self.convert_softmax,
            keras.layers.BatchNormalization: self.convert_batch_normalization,
            keras.layers.Activation: self.convert_activation,
            keras.layers.GlobalAveragePooling2D:
            self.convert_global_average_pooling2d,
            keras.layers.Add: self.convert_add,
            QuantizeLayer: self.convert_quantize_layer,
            QuantizeWrapper: self.convert_quantize_wrapper,
        }

        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)

        with tfmot.quantization.keras.quantize_scope():
            self._keras_model = keras.models.load_model(src_model_file,
                                                        compile=False)
Ejemplo n.º 9
0
    def convert_conv2d(self, op):
        if len(op.input) < 3:
            bias = self.add_bias(op)
        else:
            bias = op.input.pop()

        self.add_min_max_const_node(op, op.input[0])
        self.add_min_max_const_node(op, op.input[1])

        strides_arg = ConverterUtil.get_arg(op, 'strides')
        mace_check(strides_arg is not None,
                   "Missing strides of Conv or Depthwise Conv.")
        self.add_arg_const_node(
            op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1])

        op.input.append(bias)
        self.add_min_max_const_node(op, bias)
        self.add_min_max_const_node(op, op.output[0], True, True, False)

        self.add_padding_type_for_conv_pooling(op,
                                               self._consts[op.input[1]].dims,
                                               strides_arg.ints)

        dilations_arg = ConverterUtil.get_arg(op, 'dilations')
        mace_check(
            dilations_arg is None
            or (dilations_arg.ints[0] == 1 and dilations_arg.ints[1] == 1),
            "Hexagon only support dilations[1,1].")

        if op.type == MaceOp.DepthwiseConv2d.name:
            op.type = HexagonOp.DepthwiseSupernode_8x8p32to8.name
        else:
            op.type = HexagonOp.Supernode_8x8p32to8.name
Ejemplo n.º 10
0
    def infer_shape_matmul(self, op):
        lhs_shape = self._output_shape_cache[op.input[0]]
        lhs_rank = len(lhs_shape)
        lhs_rows = lhs_shape[-2]
        lhs_cols = lhs_shape[-1]
        rhs_shape = self._output_shape_cache[op.input[1]]
        rhs_rank = len(rhs_shape)
        rhs_rows = rhs_shape[-2]
        rhs_cols = rhs_shape[-1]
        transpose_a_ = ConverterUtil.get_arg(
            op, MaceKeyword.mace_transpose_a_str).i
        transpose_b_ = ConverterUtil.get_arg(
            op, MaceKeyword.mace_transpose_b_str).i

        rows = lhs_cols if transpose_a_ else lhs_rows
        cols = rhs_rows if transpose_b_ else rhs_cols

        if lhs_rank >= rhs_rank:
            if lhs_rank > rhs_rank:
                mace_check(
                    rhs_rank == 2,
                    'The rhs rank of non-batched MatMul must be 2')  # noqa
            output_shape = lhs_shape.copy()
            output_shape[lhs_rank - 2] = rows
            output_shape[lhs_rank - 1] = cols
        else:
            output_shape = rhs_shape.copy()
            output_shape[rhs_rank - 2] = rows
            output_shape[rhs_rank - 1] = cols
        self.add_output_shape(op, [output_shape])
Ejemplo n.º 11
0
    def __init__(self, option, src_model_file, src_weight_file):
        self._op_converters = {
            'Input': self.convert_nop,
            'Convolution': self.convert_conv2d,
            'Deconvolution': self.convert_deconv2d,
            'Eltwise': self.convert_elementwise,
            'Add': self.convert_add,
            'ReLU': self.convert_activation,
            'ReLU6': self.convert_activation,
            'TanH': self.convert_activation,
            'Sigmoid': self.convert_activation,
            'PReLU': self.convert_activation,
            'Clip': self.convert_activation,
            'Pooling': self.convert_pooling,
            'Concat': self.convert_concat,
            'Slice': self.convert_slice,
            'Softmax': self.convert_softmax,
            'InnerProduct': self.convert_fully_connected,
            'Interp': self.convert_interp,
            'BatchNorm': self.convert_folded_batchnorm,
            'GroupNorm': self.convert_group_norm,
            'Crop': self.convert_crop,
            'Scale': self.convert_scale,
            'ShuffleChannel': self.convert_channel_shuffle,
            'Permute': self.convert_permute,
            'Flatten': self.convert_flatten,
            'PriorBox': self.convert_prior_box,
            'Reshape': self.convert_reshape,
            'L2Normalization': self.convert_lpnorm,
            'L1Normalization': self.convert_lpnorm,
            'MVN': self.convert_MVN,
            'Bias': self.convert_bias,
            'ArgMax': self.convert_argmax,
            'ResizeNearest': self.convert_resize_nearest,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
        self._caffe_net = CaffeNet()
        self._caffe_layers = caffe_pb2.NetParameter()
        caffe_weights = caffe_pb2.NetParameter()

        # parse prototxt
        with open(src_model_file, 'r') as f:
            google.protobuf.text_format.Merge(
                str(f.read()), self._caffe_layers)
            self.filter_test_layers(self._caffe_layers)
            for layer in self._caffe_layers.layer:
                self._caffe_net.add_layer(layer)

        # parse model weight
        with open(src_weight_file, 'rb') as f:
            caffe_weights.ParseFromString(f.read())
            self.filter_test_layers(caffe_weights)
            for weight in caffe_weights.layer:
                self._caffe_net.add_blob(weight)

        self._skip_ops = []
Ejemplo n.º 12
0
 def infer_shape_fully_connected(self, op):
     input_shape = self._output_shape_cache[op.input[0]]
     weight_shape = self._output_shape_cache[op.input[1]]
     if ConverterUtil.data_format(op) == DataFormat.NCHW:
         output_shape = [input_shape[0], weight_shape[0], 1, 1]
     else:
         mace_check(
             False,
             "format %s is not supported" % ConverterUtil.data_format(op))
     self.add_output_shape(op, [output_shape])
Ejemplo n.º 13
0
 def add_tensorflow_padding_value(self):
     for op in self._model.op:
         padding_type = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_padding_str)
         if padding_type is None:
             continue
         padding_arg = op.arg.add()
         padding_arg.name = MaceKeyword.mace_padding_values_str
         if padding_type.i == PaddingMode.VALID.value:
             padding_arg.ints.extend([0, 0, 0, 0])
         elif padding_type.i == PaddingMode.SAME.value:
             stride = ConverterUtil.get_arg(
                 op, MaceKeyword.mace_strides_str).ints
             kernel = []
             dilation = [1, 1]
             if op.type == MaceOp.Conv2D.name or \
                op.type == MaceOp.DepthwiseConv2d.name or \
                op.type == MaceOp.Deconv2D.name:
                 if ConverterUtil.get_arg(
                         op, MaceKeyword.mace_dilations_str) is not None:
                     dilation = ConverterUtil.get_arg(
                         op, MaceKeyword.mace_dilations_str).ints
                 for tensor in self._model.tensors:
                     if tensor.name == op.input[1]:
                         kernel = tensor.dims[1:3]
                         break
             else:
                 kernel = ConverterUtil.get_arg(
                     op, MaceKeyword.mace_kernel_str).ints
             in_size = []
             for input_info in self._model.input_info:
                 if input_info.name == op.input[0]:
                     in_size = input_info.dims[1:3]
                     break
             for _op in self._model.op:
                 for out in _op.output:
                     if out == op.input[0]:
                         in_size = _op.output_shape[0].dims[1:3]
                         break
                 if len(in_size) > 0:
                     break
             out_size = op.output_shape[0].dims[1:3]
             if (op.type == MaceOp.Deconv2D.name):
                 h = (in_size[0] - 1) * stride[0] + kernel[0] - out_size[0]
                 w = (in_size[1] - 1) * stride[1] + kernel[1] - out_size[1]
             else:
                 h = (out_size[0] - 1) * stride[0] \
                     + ((kernel[0] - 1) * dilation[0] + 1) - in_size[0]
                 w = (out_size[1] - 1) * stride[1] \
                     + ((kernel[1] - 1) * dilation[1] + 1) - in_size[1]
             top = int(np.floor(h / 2))
             left = int(np.floor(w / 2))
             bottom = h - top
             right = w - left
             padding_arg.ints.extend([top, right, bottom, left])
Ejemplo n.º 14
0
 def common_check(self):
     for op in self._model.op:
         mace_check(
             len(op.input) >= 1,
             op.name + ': apu does not support op with 0 input')
         mace_check(
             len(op.output) == 1,
             op.name + ': apu only support single output op')
         mace_check(
             len(op.output) == len(op.output_shape),
             op.name + ': length of output and output_shape not'
             ' match')
         mace_check(
             len(op.output_shape[0].dims) <= 4,
             op.name + ': apu only support 1D~4D tensor')
         if op.output_type[0] == mace_pb2.DT_UINT8 \
                 or op.output_type[0] == mace_pb2.DT_INT16:
             mace_check(
                 len(op.output) == len(op.quantize_info),
                 op.name + ': length of output and quantize_info not'
                 ' match')
         data_format = ConverterUtil.data_format(op)
         if data_format is not None and len(op.output_shape[0].dims) == 4:
             mace_check((data_format == DataFormat.NHWC)
                        or (data_format == DataFormat.AUTO),
                        op.name + ': apu only support 4D tensor with NHWC'
                        ' or AUTO format but find ' + str(data_format))
         act_mode_arg = ConverterUtil.get_arg(
             op, MaceKeyword.mace_activation_type_str)
         if act_mode_arg is not None:
             mace_check(
                 act_mode_arg.s == b'PRELU' or act_mode_arg.s == b'RELU'
                 or act_mode_arg.s == b'RELUX' or act_mode_arg.s == b'TANH'
                 or act_mode_arg.s == b'SIGMOID',
                 op.name + ': apu only support activation RELU,'
                 ' RELUX, TANH and SIGMOID')
     for tensor in self._model.tensors:
         mace_check(
             len(tensor.dims) <= 4,
             tensor.name + ': apu only support 1D~4D tensor')
     for input_info in self._model.input_info:
         mace_check(
             len(input_info.dims) <= 4,
             input_info.name + ': apu only support 1D~4D tensor')
         mace_check(
             input_info.data_type == mace_pb2.DT_FLOAT
             or input_info.data_type == mace_pb2.DT_INT16
             or input_info.data_type == mace_pb2.DT_UINT8,
             input_info.name + ': apu only support '
             'float/uint8/int16 input')
         if len(input_info.dims) == 4:
             mace_check(
                 input_info.data_format == DataFormat.NHWC.value,
                 input_info.name + ': apu only support 4D tensor'
                 ' with NHWC format')
Ejemplo n.º 15
0
    def convert_elementwise(self, op):
        element_type = ConverterUtil.get_arg(
            op, MaceKeyword.mace_element_type_str).i

        if element_type == EltwiseType.DIV.value and \
                op.input[0] in self._consts:
            tensor = self._consts[op.input[0]]
            if len(tensor.int32_data) == 1:
                f = tensor.scale * (tensor.int32_data[0] - tensor.zero_point)
                if abs(f - 1) < 1e-6:  # recip
                    op_input = op.input[1]
                    del op.input[:]
                    op.input.append(op_input)
                    self.add_min_max_const_node(op, op.input[0])
                    op.type = HexagonOp.QuantizedRecip_8.name
                    return
        if element_type == EltwiseType.POW.value and \
                ConverterUtil.get_arg(
                    op, MaceKeyword.mace_scalar_input_str).f == 0.5:
            self.add_min_max_const_node(op, op.input[0])
            op.type = HexagonOp.QuantizedSqrt_8.name
            return
        if element_type == EltwiseType.CLIP.value:
            self.add_min_max_const_node(op, op.input[0])
            coeff = ConverterUtil.get_arg(op,
                                          MaceKeyword.mace_coeff_str).floats
            min_value, max_value = coeff[0], coeff[1]
            self.add_arg_const_node(op,
                                    "/min:0", [1], [min_value],
                                    data_type=mace_pb2.DT_FLOAT)
            self.add_arg_const_node(op,
                                    "/max:0", [1], [max_value],
                                    data_type=mace_pb2.DT_FLOAT)
            op.type = HexagonOp.QuantizedClamp_8.name
            return
        if len(op.input) == 1:
            scalar_input = ConverterUtil.get_arg(
                op, MaceKeyword.mace_scalar_input_str).f
            self.add_quantized_scalar_const_node("/b:0", scalar_input, op)
        self.add_min_max_const_node(op, op.input[0])
        self.add_min_max_const_node(op, op.input[1])

        if element_type in [
                EltwiseType.SUM.value, EltwiseType.SUB.value,
                EltwiseType.MIN.value, EltwiseType.MAX.value,
                EltwiseType.DIV.value
        ]:
            self.add_min_max_const_node(op, op.output[0], True, True, False)
        try:
            op.type = self.eltwise_type[element_type]
        except KeyError:
            mace_check(
                False, "Hexagon does not support elementwise %s" %
                EltwiseType(element_type).name)
Ejemplo n.º 16
0
    def infer_shape_conv_pool_shape(self, op):
        input_shape = self._output_shape_cache[op.input[0]]
        output_shape = np.zeros_like(input_shape)
        if op.type == MaceOp.Pooling:
            filter_shape = list(
                ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str).ints)
            if ConverterUtil.data_format(op) == DataFormat.NCHW:
                filter_shape = [input_shape[1], input_shape[1]] + filter_shape
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[2] = input_shape[2]
                    filter_shape[3] = input_shape[3]
            else:  # NHWC
                filter_shape = filter_shape + [input_shape[1], input_shape[1]]
                if ConverterUtil.get_arg(op,
                                         MaceKeyword.mace_global_pooling_str) \
                        is not None:
                    filter_shape[0] = input_shape[1]
                    filter_shape[1] = input_shape[2]
        else:
            filter_shape = self._output_shape_cache[op.input[1]]

        paddings = ConverterUtil.get_arg(
            op, MaceKeyword.mace_padding_values_str).ints  # noqa
        strides = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str).ints
        dilations_arg = ConverterUtil.get_arg(op,
                                              MaceKeyword.mace_dilations_str)
        if dilations_arg is not None:
            dilations = dilations_arg.ints
        else:
            dilations = [1, 1]
        if op.type == MaceOp.Pooling:
            round_func = math.ceil
        else:
            round_func = math.floor

        output_shape[0] = input_shape[0]
        if ConverterUtil.data_format(op) == DataFormat.NCHW \
                and ConverterUtil.filter_format(self._net) == DataFormat.OIHW:  # noqa
            # filter format: OIHW
            if op.type == MaceOp.DepthwiseConv2d.name:
                output_shape[1] = filter_shape[0] * filter_shape[1]
            else:
                output_shape[1] = filter_shape[0]
            output_shape[2] = int(
                round_func((input_shape[2] + paddings[0] - filter_shape[2] -
                            (filter_shape[2] - 1) *
                            (dilations[0] - 1)) / float(strides[0]))) + 1
            output_shape[3] = int(
                round_func((input_shape[3] + paddings[1] - filter_shape[3] -
                            (filter_shape[3] - 1) *
                            (dilations[1] - 1)) / float(strides[1]))) + 1
        else:
            mace_check(
                False, "Mace can only infer shape for"
                " NCHW input and OIHW filter")

        self.add_output_shape(op, [output_shape])
Ejemplo n.º 17
0
    def convert_general_op(self, keras_op):
        op = self._mace_net_def.op.add()
        op.name = keras_op.name
        data_type_arg = op.arg.add()
        data_type_arg.name = "T"
        data_type_arg.i = dtype2mtype(keras_op.dtype)
        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.KERAS.value
        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        return op
Ejemplo n.º 18
0
    def convert_stridedslice(self, op):
        begin_mask = ConverterUtil.get_arg(op,
                                           MaceKeyword.mace_begin_mask_str).i
        end_mask = ConverterUtil.get_arg(op, MaceKeyword.mace_end_mask_str).i
        shrink_mask = ConverterUtil.get_arg(
            op, MaceKeyword.mace_shrink_axis_mask_str).i
        self.add_arg_const_node(op, "/begin_mask:0", [1], [begin_mask])
        self.add_arg_const_node(op, "/end_mask:0", [1], [end_mask])
        self.add_arg_const_node(op, "/shrink_mask:0", [1], [shrink_mask])
        self.add_min_max_const_node(op, op.input[0])

        op.type = HexagonOp.QuantizedStridedSlice_8.name
Ejemplo n.º 19
0
    def add_deconv_pad_node(self, op):
        padding_type_arg = \
            ConverterUtil.get_arg(op, MaceKeyword.mace_padding_type_str)
        padding_values_arg = \
            ConverterUtil.get_arg(op, MaceKeyword.mace_padding_values_str)
        mace_check(
            padding_type_arg is not None or padding_values_arg is not None,
            "Missing padding of Deconv.")
        if padding_type_arg is not None:
            padding_type = PaddingMode(padding_type_arg.i)
            strides_arg = ConverterUtil.get_arg(op,
                                                MaceKeyword.mace_strides_str)
            mace_check(strides_arg is not None, "Missing strides of Deconv.")
            stride_h = strides_arg.ints[0]
            stride_w = strides_arg.ints[1]

            input_shape = self.get_input_shape(op.input[0])
            input_h = input_shape[1]
            input_w = input_shape[2]
            filter_tensor = self._consts[op.input[1]]
            filter_h = filter_tensor.dims[1]
            filter_w = filter_tensor.dims[2]
            output_h = op.output_shape[0].dims[1]
            output_w = op.output_shape[0].dims[2]

            if padding_type == PaddingMode.VALID:
                expected_input_h = (output_h - filter_h + stride_h) // stride_h
                expected_input_w = (output_w - filter_w + stride_w) // stride_w
            elif padding_type == PaddingMode.SAME:
                expected_input_h = (output_h + stride_h - 1) // stride_h
                expected_input_w = (output_w + stride_w - 1) // stride_w
            else:
                raise Exception(
                    'Hexagon deconv does not support padding type: ',
                    padding_type)
            mace_check(expected_input_h == input_h,
                       "Wrong input/output height")
            mace_check(expected_input_w == input_w, "Wrong input/output width")

            pad_h = (input_h - 1) * stride_h + filter_h - output_h
            pad_w = (input_w - 1) * stride_w + filter_w - output_w
        else:
            pad_h = padding_values_arg.ints[0]
            pad_w = padding_values_arg.ints[1]

        pad_h, pad_w = max(pad_h, 0), max(pad_w, 0)
        pad_top = pad_h // 2
        pad_bottom = pad_h - pad_top
        pad_left = pad_w // 2
        pad_right = pad_w - pad_left
        paddings = [pad_top, pad_bottom, pad_left, pad_right]
        self.add_arg_const_node(op, "/paddings:0", [1, 1, 2, 2], paddings)
Ejemplo n.º 20
0
 def infer_shape_resize_bilinear(self, op):
     input_shape = self._output_shape_cache[op.input[0]]
     size = ConverterUtil.get_arg(op, MaceKeyword.mace_resize_size_str).ints
     if ConverterUtil.data_format(op) == DataFormat.NCHW:
         output_shape = [input_shape[0], input_shape[1], size[0], size[1]]
     elif ConverterUtil.data_format(op) == DataFormat.NHWC:
         output_shape = [input_shape[0], size[0], size[1], input_shape[3]]
     else:
         output_shape = []
         mace_check(
             False,
             "format %s is not supported" % ConverterUtil.data_format(op))
     self.add_output_shape(op, [output_shape])
Ejemplo n.º 21
0
    def convert_resizebilinear(self, op):
        newdim_arg = ConverterUtil.get_arg(op,
                                           MaceKeyword.mace_resize_size_str)
        self.add_arg_const_node(op, '/newdim:0', [len(newdim_arg.ints)],
                                newdim_arg.ints)

        self.add_min_max_const_node(op, op.input[0])

        align_corners_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_align_corners_str)
        self.add_arg_const_node(op, '/align_corners:0', [1],
                                [align_corners_arg.i])

        op.type = HexagonOp.QuantizedResizeBilinear_8.name
Ejemplo n.º 22
0
    def convert_activation(self, op):
        self.add_min_max_const_node(op, op.input[0])

        act_type = ConverterUtil.get_arg(
            op, MaceKeyword.mace_activation_type_str).s.decode()
        if act_type == ActivationType.RELUX.name:
            x = ConverterUtil.get_arg(
                op, MaceKeyword.mace_activation_max_limit_str).f
            self.add_scalar_const_node("/x:0", x, op)
        try:
            op.type = self.activation_type[act_type]
        except KeyError:
            mace_check(False,
                       "Hexagon does not support activation %s" % act_type)
Ejemplo n.º 23
0
    def infer_shape_prior_box(self, op):
        output_shape = [1, 2, 1]
        input_shape = list(self._output_shape_cache[op.input[0]])
        input_w = input_shape[3]
        input_h = input_shape[2]
        min_size = ConverterUtil.get_arg(
            op, MaceKeyword.mace_min_size_str).floats  # noqa
        max_size = ConverterUtil.get_arg(
            op, MaceKeyword.mace_max_size_str).floats  # noqa
        aspect_ratio = ConverterUtil.get_arg(
            op, MaceKeyword.mace_aspect_ratio_str).floats  # noqa
        num_prior = len(aspect_ratio) * len(min_size) + len(max_size)

        output_shape[2] = num_prior * input_h * input_w * 4
        self.add_output_shape(op, [output_shape])
Ejemplo n.º 24
0
    def add_resize_args(self, op):
        align_corners_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_align_corners_str)
        self.add_arg_const_node(op, '/align_corners:0', [1],
                                [align_corners_arg.i])

        coordinate_transformation_mode_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_coordinate_transformation_mode_str)
        if coordinate_transformation_mode_arg is not None:
            name = CoordinateTransformationMode(
                coordinate_transformation_mode_arg.i)
            value = coordinate_transformation_mode_arg.i
            mace_check(value == CoordinateTransformationMode.HALF_PIXEL.value,
                       "Hexagon does not support resize %s" % name)
            self.add_arg_const_node(op, '/half_pixel_centers:0', [1], [1])
Ejemplo n.º 25
0
    def convert_general_op(self, outputs_vals):
        op = self._mace_net_def.op.add()
        op.name = outputs_vals[0].debugName()

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        data_type_arg.i = self._option.data_type

        framework_type_arg = op.arg.add()
        framework_type_arg.name = MaceKeyword.mace_framework_type_str
        framework_type_arg.i = FrameworkType.PYTORCH.value

        ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)

        return op
Ejemplo n.º 26
0
    def split_activation_op(self, keras_op, op):
        activation = keras_op.get_config()["activation"]
        if "class_name" in activation:
            assert activation["class_name"] == "QuantizeAwareActivation"
            activation = activation["config"]["activation"]

        if activation == "linear":
            op.output.append(get_output(keras_op).name)
            output_shape = op.output_shape.add()
            output_shape.dims.extend(
                keras_shape2list(get_output(keras_op).shape)
            )

            return None
        else:
            activation_tmp_name = get_output(keras_op).name + "_act"
            op.output.append(activation_tmp_name)
            output_shape = op.output_shape.add()
            output_shape.dims.extend(
                keras_shape2list(get_output(keras_op).shape)
            )

            activation_op = self._mace_net_def.op.add()
            activation_op.name = keras_op.name + "_act"
            if activation == "softmax":
                activation_op.type = MaceOp.Softmax.name
            else:
                activation_op.type = MaceOp.Activation.name
                type_arg = activation_op.arg.add()
                type_arg.name = MaceKeyword.mace_activation_type_str
                type_arg.s = six.b(activation_types_dict[activation].name)

            activation_op.input.append(activation_tmp_name)
            activation_op.output.append(get_output(keras_op).name)
            output_shape = activation_op.output_shape.add()
            output_shape.dims.extend(
                keras_shape2list(get_output(keras_op).shape)
            )

            data_type_arg = activation_op.arg.add()
            data_type_arg.name = "T"
            data_type_arg.i = dtype2mtype(keras_op.dtype)
            framework_type_arg = activation_op.arg.add()
            framework_type_arg.name = MaceKeyword.mace_framework_type_str
            framework_type_arg.i = FrameworkType.KERAS.value
            ConverterUtil.add_data_format_arg(activation_op, DataFormat.NHWC)

            return activation_op
Ejemplo n.º 27
0
    def convert_conv2d(self, op):
        channels = op.output_shape[0].dims[3]
        if len(op.input) < 3:
            print('Supernode requires biasadd, we add it.')
            bias_data = np.zeros(channels, dtype=int)
            bias_tensor = self._model.tensors.add()
            bias_tensor.data_type = mace_pb2.DT_INT32
            bias_tensor.dims.extend([channels])
            bias_tensor.int32_data.extend(bias_data)
            bias_tensor.minval = 0
            bias_tensor.maxval = 0
            bias_tensor.name = op.name + "/bias:0"
            bias = bias_tensor.name
            self._consts[bias] = bias_tensor
        else:
            bias = op.input.pop()

        self.add_min_max_const_node(op, op.input[0])
        self.add_min_max_const_node(op, op.input[1])

        strides_arg = ConverterUtil.get_arg(op, 'strides')
        mace_check(strides_arg is not None,
                   "Missing strides of Conv or Depthwise Conv.")
        self.add_arg_const_node(
            op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1])

        op.input.append(bias)
        self.add_min_max_const_node(op, bias)
        self.add_min_max_const_node(op, op.output[0], True, True, False)

        if op.type == MaceOp.DepthwiseConv2d.name:
            op.type = HexagonOp.DepthwiseSupernode_8x8p32to8.name
        else:
            op.type = HexagonOp.Supernode_8x8p32to8.name
Ejemplo n.º 28
0
 def run(self):
     if self._option.quantize:
         self.use_quant_in_out()
     self.add_op_output_type()
     self.ensure_bias_vector()
     self.ensure_binary_input()
     self.common_check()
     if ConverterUtil.get_arg(self._model,
                              MaceKeyword.mace_framework_type_str).i == \
        FrameworkType.TENSORFLOW.value:
         self.add_tensorflow_padding_value()
     # Calculate the number of apu constant tensors
     # Any tensors which will be apu constant tensors should be added
     # above this line
     const_data_num_arg = self._model.arg.add()
     const_data_num_arg.name = MaceKeyword.mace_const_data_num_arg_str
     const_data_num_arg.i = len(self._model.tensors)
     apu_data_type_arg = self._model.arg.add()
     apu_data_type_arg.name = MaceKeyword.mace_apu_data_type_arg_str
     if self._option.quantize_schema == 'mace_apu_16bit_per_tensor':
         apu_data_type_arg.i = mace_pb2.DT_INT16
     elif self._option.quantize:
         apu_data_type_arg.i = mace_pb2.DT_UINT8
     else:
         apu_data_type_arg.i = mace_pb2.DT_FLOAT
     self.convert_ops()
     self.add_node_id()
     return self._model
Ejemplo n.º 29
0
    def init_multi_net_def_info(self, multi_net_def):
        netdefs = multi_net_def.net_def
        self.net_num = len(netdefs)
        self.net_defs = [None] * self.net_num
        self.net_op_nums = [0] * self.net_num
        self.quantizes = [False] * self.net_num
        self.hexagons = [False] * self.net_num
        for net_def in netdefs:
            order = net_def.infer_order
            self.net_defs[order] = net_def
            self.net_op_nums[order] = len(net_def.op)
            is_quantize = ConverterUtil.get_arg(
                net_def, MaceKeyword.mace_quantize_flag_arg_str)
            self.quantizes[order] = \
                False if is_quantize is None else is_quantize.i == 1
            self.hexagons[order] = \
                self.quantizes[order] and \
                (net_def.op[-1].type == HexagonOp.DequantizeOUTPUT_8tof.name or
                 net_def.op[-1].type == HexagonOp.OUTPUT.name)

        self.end_index = self.start_index = 0
        for op_num in self.net_op_nums:
            self.end_index = self.end_index + op_num
        self.start_net_idx = 0
        self.start_op_idx = 0
        self.end_net_idx = self.net_num
        self.end_op_idx = self.net_op_nums[self.end_net_idx - 1]
Ejemplo n.º 30
0
    def convert_pooling(self, op):
        self.add_min_max_const_node(op, op.input[0])

        window_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str)
        self.add_arg_const_node(op, '/window:0',
                                [1, window_arg.ints[0], window_arg.ints[1], 1])
        strides_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str)
        self.add_arg_const_node(
            op, '/strides:0', [1, strides_arg.ints[0], strides_arg.ints[1], 1])

        pooling_type_arg = ConverterUtil.get_arg(
            op, MaceKeyword.mace_pooling_type_str)
        if PoolingType(pooling_type_arg.i) == PoolingType.AVG:
            op.type = HexagonOp.QuantizedAvgPool_8.name
        else:
            op.type = HexagonOp.QuantizedMaxPool_8.name