示例#1
0
    def add_deconv_pad_node(self, op):
        padding_type_arg = \
            ConverterUtil.get_arg(op, MaceKeyword.mace_padding_type_str)
        mace_check(padding_type_arg is not None, "Missing padding of Deconv.")
        padding_type = PaddingMode(padding_type_arg.i)
        filter_tensor = self._consts[op.input[1]]
        filter_height = filter_tensor.dims[1]
        filter_width = filter_tensor.dims[2]

        if padding_type == PaddingMode.VALID:
            paddings = [0, 0, 0, 0]
        elif padding_type == PaddingMode.SAME:
            pad_height, pad_width = filter_height // 2, filter_width // 2
            paddings = [pad_height, pad_height, pad_width, pad_width]
        else:
            raise Exception('Hexagon deconv does not support padding type: ',
                            padding_type)

        padding_tensor = self._model.tensors.add()
        padding_tensor.name = op.name + "/paddings:0"
        padding_tensor.data_type = mace_pb2.DT_INT32
        padding_tensor.dims.extend([1, 1, 2, 2])
        padding_tensor.int32_data.extend(paddings)

        self._consts[padding_tensor.name] = padding_tensor
        op.input.append(padding_tensor.name)
示例#2
0
    def add_deconv_pad_node(self, op):
        padding_type_arg = \
            ConverterUtil.get_arg(op, MaceKeyword.mace_padding_type_str)
        padding_values_arg = \
            ConverterUtil.get_arg(op, MaceKeyword.mace_padding_values_str)
        mace_check(
            padding_type_arg is not None or padding_values_arg is not None,
            "Missing padding of Deconv.")
        if padding_type_arg is not None:
            padding_type = PaddingMode(padding_type_arg.i)
            strides_arg = ConverterUtil.get_arg(op,
                                                MaceKeyword.mace_strides_str)
            mace_check(strides_arg is not None, "Missing strides of Deconv.")
            stride_h = strides_arg.ints[0]
            stride_w = strides_arg.ints[1]

            input_shape = self.get_input_shape(op.input[0])
            input_h = input_shape[1]
            input_w = input_shape[2]
            filter_tensor = self._consts[op.input[1]]
            filter_h = filter_tensor.dims[1]
            filter_w = filter_tensor.dims[2]
            output_h = op.output_shape[0].dims[1]
            output_w = op.output_shape[0].dims[2]

            if padding_type == PaddingMode.VALID:
                expected_input_h = (output_h - filter_h + stride_h) // stride_h
                expected_input_w = (output_w - filter_w + stride_w) // stride_w
            elif padding_type == PaddingMode.SAME:
                expected_input_h = (output_h + stride_h - 1) // stride_h
                expected_input_w = (output_w + stride_w - 1) // stride_w
            else:
                raise Exception(
                    'Hexagon deconv does not support padding type: ',
                    padding_type)
            mace_check(expected_input_h == input_h,
                       "Wrong input/output height")
            mace_check(expected_input_w == input_w, "Wrong input/output width")

            pad_h = (input_h - 1) * stride_h + filter_h - output_h
            pad_w = (input_w - 1) * stride_w + filter_w - output_w
        else:
            pad_h = padding_values_arg.ints[0]
            pad_w = padding_values_arg.ints[1]

        pad_h, pad_w = max(pad_h, 0), max(pad_w, 0)
        pad_top = pad_h // 2
        pad_bottom = pad_h - pad_top
        pad_left = pad_w // 2
        pad_right = pad_w - pad_left
        paddings = [pad_top, pad_bottom, pad_left, pad_right]
        self.add_arg_const_node(op, "/paddings:0", [1, 1, 2, 2], paddings)
示例#3
0
    def post_convert(self, op):
        if op.type != MaceOp.Dequantize.name:
            min_output_shape = op.output_shape.add()
            min_output_shape.dims.extend([1])
            max_output_shape = op.output_shape.add()
            max_output_shape.dims.extend([1])
            op.output_type.extend(
                [mace_pb2.DT_UINT8, mace_pb2.DT_FLOAT, mace_pb2.DT_FLOAT])
        for i in range(len(op.output_shape)):
            out_max_byte_size = reduce(mul, op.output_shape[i].dims)
            if op.output_type[i] == mace_pb2.DT_FLOAT:
                out_max_byte_size *= 4
            op.out_max_byte_size.extend([out_max_byte_size])

        op.padding = padding_mode[PaddingMode.NA]
        arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str)
        if arg is not None:
            op.padding = padding_mode[PaddingMode(arg.i)]
示例#4
0
    def add_padding_type_for_conv_pooling(self, op, kernels, strides):
        arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str)
        if arg is not None:  # TensorFlow
            op.padding = padding_mode[PaddingMode(arg.i)]
        else:  # PyTorch, Caffe
            input_shape = self.get_input_shape(op.input[0])
            output_shape = op.output_shape[0].dims
            in_h, in_w = input_shape[1], input_shape[2]
            k_h, k_w = kernels[0], kernels[1]
            out_h, out_w = output_shape[1], output_shape[2]

            if (out_h == (in_h - k_h) // strides[0] + 1) and \
                    (out_w == (in_w - k_w) // strides[1] + 1):
                op.padding = HexagonPadding.NN_PAD_VALID.value
            elif (out_h == (in_h - 1) // strides[0] + 1) and \
                    (out_w == (in_w - 1) // strides[1] + 1):
                op.padding = HexagonPadding.NN_PAD_SAME_CAFFE.value
            else:
                mace_check(
                    False,
                    "Hexagon does not support padding type for: %s" % op)
示例#5
0
    def convert_ops(self):
        print("Convert mace graph to hexagon.")
        for op in self._model.op:
            if not self._hexagon_ops.has_op(op.type):
                raise Exception('Unsupported op: ', op)

            self.add_port_for_tensors(op.input)
            self.add_port_for_tensors(op.output)

            if op.type == MaceOp.Conv2D.name \
                    or op.type == MaceOp.DepthwiseConv2d.name:
                channels = op.output_shape[0].dims[3]

                if len(op.input) < 3:
                    print('Supernode requires biasadd, we add it.')
                    bias_data = np.zeros(channels, dtype=int)
                    bias_tensor = self._model.tensors.add()
                    bias_tensor.data_type = mace_pb2.DT_INT32
                    bias_tensor.dims.extend([channels])
                    bias_tensor.int32_data.extend(bias_data)
                    bias_tensor.minval = 0
                    bias_tensor.maxval = 0
                    bias_tensor.name = op.name + "/bias:0"
                    bias = bias_tensor.name
                    self._consts[bias] = bias_tensor
                else:
                    bias = op.input.pop()

                self.add_min_max_const_node(op, op.input[0])
                self.add_min_max_const_node(op, op.input[1])
                strides_arg = ConverterUtil.get_arg(op, 'strides')
                mace_check(strides_arg is not None,
                           "Missing strides of Conv or Depthwise Conv.")
                strides = self.add_shape_const_node(
                    op, [1, strides_arg.ints[0], strides_arg.ints[1], 1],
                    MaceKeyword.mace_strides_str)
                op.input.extend([strides, bias])
                self.add_min_max_const_node(op, bias)
                self.add_min_max_const_node(op, op.output[0], True, True,
                                            False)
            elif op.type == MaceOp.Eltwise.name:
                self.add_min_max_const_node(op, op.input[0])
                self.add_min_max_const_node(op, op.input[1])
                element_type = \
                    ConverterUtil.get_arg(op,
                                          MaceKeyword.mace_element_type_str).i
                if element_type == EltwiseType.SUM.value \
                        or element_type == EltwiseType.SUB.value:
                    self.add_min_max_const_node(op, op.output[0], True, True,
                                                False)
            elif op.type == MaceOp.BatchToSpaceND.name \
                    or op.type == MaceOp.SpaceToBatchND.name:
                strides_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_space_batch_block_shape_str)
                strides_tensor = self._model.tensors.add()
                strides_tensor.name = op.name + '/strides:0'
                strides_tensor.data_type = mace_pb2.DT_INT32
                strides_tensor.dims.extend([1, 1, 1, len(strides_arg.ints)])
                strides_tensor.int32_data.extend(strides_arg.ints)
                if op.type == MaceOp.BatchToSpaceND.name:
                    pad_arg = ConverterUtil.get_arg(
                        op, MaceKeyword.mace_batch_to_space_crops_str)
                else:
                    pad_arg = ConverterUtil.get_arg(
                        op, MaceKeyword.mace_paddings_str)
                pad_tensor = self._model.tensors.add()
                pad_tensor.name = op.name + '/pad:0'
                pad_tensor.data_type = mace_pb2.DT_INT32
                pad_tensor.dims.extend([1, 1, len(pad_arg.ints) / 2, 2])
                pad_tensor.int32_data.extend(pad_arg.ints)
                op.input.extend([strides_tensor.name, pad_tensor.name])
                self.add_min_max_const_node(op, op.input[0])
            elif op.type == MaceOp.DepthToSpace.name \
                    or op.type == MaceOp.SpaceToDepth.name:
                size_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_space_depth_block_size_str)
                size_tensor = self._model.tensors.add()
                size_tensor.name = op.name + '/block_size:0'
                size_tensor.data_type = mace_pb2.DT_INT32
                size_tensor.dims.extend([1])
                size_tensor.int32_data.extend([size_arg.i])
                op.input.extend([size_tensor.name])
                self.add_min_max_const_node(op, op.input[0])
            elif op.type == MaceOp.Pooling.name:
                self.add_min_max_const_node(op, op.input[0])
                window_arg = ConverterUtil.get_arg(op,
                                                   MaceKeyword.mace_kernel_str)
                window_tensor = self._model.tensors.add()
                window_tensor.name = op.name + '/window:0'
                window_tensor.data_type = mace_pb2.DT_INT32
                window_tensor.dims.extend(
                    [1, window_arg.ints[0], window_arg.ints[1], 1])
                strides_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_strides_str)
                strides_tensor = self._model.tensors.add()
                strides_tensor.name = op.name + '/strides:0'
                strides_tensor.data_type = mace_pb2.DT_INT32
                strides_tensor.dims.extend(
                    [1, strides_arg.ints[0], strides_arg.ints[1], 1])
                op.input.extend([window_tensor.name, strides_tensor.name])
            elif op.type == MaceOp.Reduce.name:
                self.add_min_max_const_node(op, op.input[0])
                reduce_type_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_reduce_type_str)
                mace_check(reduce_type_arg.i == ReduceType.MEAN.value,
                           "Hexagon Reduce only supports Mean now.")
                keep_dims_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_keepdims_str)
                mace_check(keep_dims_arg.i == 1,
                           "Hexagon Reduce Mean only supports keep dims now.")
                axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
                mace_check(1 <= len(axis_arg.ints) <= 2,
                           "Hexagon Reduce Mean only supports spatial now.")
                for i in axis_arg.ints:
                    mace_check(
                        1 <= i <= 2,
                        "Hexagon Reduce Mean only supports spatial now")
                producer_op_name, _ = get_op_and_port_from_tensor(op.input[0])
                input_dims = None
                for producer_op in self._model.op:
                    if producer_op.name == producer_op_name:
                        input_dims = producer_op.output_shape[0].dims
                        break
                mace_check(input_dims is not None, "Missing input shape.")
                window_tensor = self._model.tensors.add()
                window_tensor.name = op.name + '/window:0'
                window_tensor.data_type = mace_pb2.DT_INT32
                if len(axis_arg.ints) == 1:
                    dim1, dim2 = (input_dims[1], 1) \
                        if axis_arg.ints[0] == 1 else (1, input_dims[2])
                else:
                    dim1, dim2 = input_dims[1], input_dims[2]
                window_tensor.dims.extend([1, dim1, dim2, 1])
                strides_tensor = self._model.tensors.add()
                strides_tensor.name = op.name + '/strides:0'
                strides_tensor.data_type = mace_pb2.DT_INT32
                strides_tensor.dims.extend([1, dim1, dim2, 1])
                op.input.extend([window_tensor.name, strides_tensor.name])
            elif op.type == MaceOp.ResizeBilinear.name:
                newdim_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_resize_size_str)
                newdim_tensor = self._model.tensors.add()
                newdim_tensor.name = op.name + '/newdim:0'
                newdim_tensor.data_type = mace_pb2.DT_INT32
                newdim_tensor.dims.extend([len(newdim_arg.ints)])
                newdim_tensor.int32_data.extend(newdim_arg.ints)
                op.input.extend([newdim_tensor.name])
                self.add_min_max_const_node(op, op.input[0])
                align_corners_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_align_corners_str)
                align_corners_tensor = self._model.tensors.add()
                align_corners_tensor.name = op.name + '/align_corners:0'
                align_corners_tensor.data_type = mace_pb2.DT_INT32
                align_corners_tensor.dims.extend([1])
                align_corners_tensor.int32_data.extend([align_corners_arg.i])
                op.input.extend([align_corners_tensor.name])
            elif op.type == MaceOp.Concat.name:
                inputs = copy.deepcopy(op.input)
                for ipt in inputs:
                    self.add_min_max_const_node(op, ipt, True, False)
                for ipt in inputs:
                    self.add_min_max_const_node(op, ipt, False, True)
                dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
                dim_tensor = self._model.tensors.add()
                dim_tensor.name = op.name + '/dim:0'
                dim_tensor.data_type = mace_pb2.DT_INT32
                dim_tensor.dims.extend([1])
                dim_tensor.int32_data.extend([dim_arg.i])
                op.input.insert(0, dim_tensor.name)
            elif op.type in [MaceOp.Softmax.name, MaceOp.Dequantize.name]:
                self.add_min_max_const_node(op, op.input[0])

            if op.type != MaceOp.Dequantize.name:
                min_output_shape = op.output_shape.add()
                min_output_shape.dims.extend([1])
                max_output_shape = op.output_shape.add()
                max_output_shape.dims.extend([1])
                op.output_type.extend(
                    [mace_pb2.DT_UINT8, mace_pb2.DT_FLOAT, mace_pb2.DT_FLOAT])
            for i in range(len(op.output_shape)):
                out_max_byte_size = reduce(mul, op.output_shape[i].dims)
                if op.output_type[i] == mace_pb2.DT_FLOAT:
                    out_max_byte_size *= 4
                op.out_max_byte_size.extend([out_max_byte_size])

            op.padding = padding_mode[PaddingMode.NA]
            arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str)
            if arg is not None:
                op.padding = padding_mode[PaddingMode(arg.i)]

            if op.type == MaceOp.Eltwise.name:
                element_type = \
                    ConverterUtil.get_arg(op,
                                          MaceKeyword.mace_element_type_str).i
                if element_type == EltwiseType.SUM.value:
                    op.type = HexagonOp.QuantizedAdd_8p8to8.name
                elif element_type == EltwiseType.SUB.value:
                    op.type = HexagonOp.QuantizedSub_8p8to8.name
                elif element_type == EltwiseType.PROD.value:
                    op.type = HexagonOp.QuantizedMul_8x8to8.name
                else:
                    mace_check(
                        False, "Hexagon does not support elementwise %s" %
                        EltwiseType(element_type).name)
            elif op.type == MaceOp.Pooling.name:
                pooling_type_arg = ConverterUtil.get_arg(
                    op, MaceKeyword.mace_pooling_type_str)
                if PoolingType(pooling_type_arg.i) == PoolingType.AVG:
                    op.type = HexagonOp.QuantizedAvgPool_8.name
                else:
                    op.type = HexagonOp.QuantizedMaxPool_8.name
            else:
                op.type = self._hexagon_ops.map_nn_op(op.type)