示例#1
0
 def parse_options(self):
     options = self._flatbuf_op.BuiltinOptions()
     o = Conv2DOptions()
     o.Init(options.Bytes, options.Pos)
     self.stride = (o.StrideH(), o.StrideW())
     self.padding = self.padding_schemes[o.Padding()]
     self.dilation_factor = (o.DilationHFactor(), o.DilationWFactor())
     self.fused_activation_function = self.activation_types[
         o.FusedActivationFunction()]
     self._flatbuf_options_obj = options
     self._supported_options = [
         'stride', 'padding', 'dilation_factor', 'fused_activation_function'
     ]
示例#2
0
文件: tflite.py 项目: zhyj3038/tvm
    def convert_conv(self, op, conv_type):
        """convolution implementation."""
        try:
            from tflite.BuiltinOptions import BuiltinOptions
            from tflite.ActivationFunctionType import ActivationFunctionType
            from tflite.TensorType import TensorType
            from tflite.Operator import Operator
            from tflite.Conv2DOptions import Conv2DOptions
            from tflite.DepthwiseConv2DOptions import DepthwiseConv2DOptions
            from tflite.Padding import Padding
        except ImportError:
            raise ImportError("The tflite package must be installed")

        assert isinstance(op, Operator)
        input_tensors = self.get_input_tensors(op)
        assert len(input_tensors) >= 2, "input tensors length should be >= 2"

        input_tensor = input_tensors[0]
        input_tensor_idx = input_tensor.tensor_idx
        weight_tensor = input_tensors[1]

        is_depthwise_conv = False
        if conv_type == 'conv2d':
            assert op.BuiltinOptionsType() == BuiltinOptions.Conv2DOptions
            op_options = op.BuiltinOptions()
            conv_options = Conv2DOptions()
            conv_options.Init(op_options.Bytes, op_options.Pos)
        elif conv_type == 'depthwise':
            is_depthwise_conv = True
            assert op.BuiltinOptionsType(
            ) == BuiltinOptions.DepthwiseConv2DOptions
            op_options = op.BuiltinOptions()
            conv_options = DepthwiseConv2DOptions()
            conv_options.Init(op_options.Bytes, op_options.Pos)
            depth_multiplier = conv_options.DepthMultiplier()
            assert depth_multiplier == 1, "TF frontend have transformed it be 1 " \
                                          "no matter original value be set by 0.25, 0.5 or any else"
        else:
            raise tvm.error.OpNotImplemented(
                'Operator {} is not supported for frontend TFLite.'.format(
                    conv_type))

        stride_h = conv_options.StrideH()
        stride_w = conv_options.StrideW()
        dilation_h = conv_options.DilationHFactor()
        dilation_w = conv_options.DilationWFactor()
        padding = conv_options.Padding()
        fused_activation_fn = conv_options.FusedActivationFunction()

        _, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()

        if is_depthwise_conv:
            multiplier, kernel_h, kernel_w, in_channels = weight_tensor.tensor.ShapeAsNumpy(
            )
            assert multiplier == depth_multiplier
        else:
            output_channels, kernel_h, kernel_w, _ = weight_tensor.tensor.ShapeAsNumpy(
            )

        dilated_kernel_h = dilation_h * (kernel_h - 1) + 1
        dilated_kernel_w = dilation_w * (kernel_w - 1) + 1

        params = {
            'kernel_size': [kernel_h, kernel_w],
            'strides': [stride_h, stride_w],
            'dilation': [dilation_h, dilation_w],
            'padding': [0, 0]
        }

        if is_depthwise_conv:
            params['channels'] = int(in_channels * multiplier)
            params['groups'] = int(in_channels)
        else:
            params['channels'] = int(output_channels)

        # weight tensor type should be UINT8 (quantization) or FLOAT32
        weight_tensor_type = weight_tensor.tensor.Type()
        assert weight_tensor_type in (TensorType.UINT8, TensorType.FLOAT32)
        weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)

        in_expr = self.get_expr(input_tensor_idx)
        weight_value = self.get_tensor_value(weight_tensor)

        if is_depthwise_conv:
            # TFLite is M KH KW IC, we require IC M KH KW
            weight_value = weight_value.transpose((3, 0, 1, 2))
        else:
            # TFLite is OC KH KW IC, we require OC IC KH kW
            weight_value = weight_value.transpose((0, 3, 1, 2))

        weight_expr = self.exp_tab.new_const(weight_value,
                                             dtype=weight_tensor_type_str)

        if padding == Padding.VALID:
            pass
        elif padding == Padding.SAME:
            pad_top, pad_bottom = get_pad_value(input_h, dilated_kernel_h,
                                                stride_h)
            pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w,
                                                stride_w)
            in_expr = _op.nn.pad(data=in_expr,
                                 pad_width=((0, 0), (0, 0), (pad_top,
                                                             pad_bottom),
                                            (pad_left, pad_right)))
        else:
            raise tvm.error.OpAttributeUnimplemented(
                'Padding format {} is not supported for operator Conv.'.format(
                    padding))

        out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)

        # if we have bias
        if len(input_tensors) == 3:
            bias_tensor = input_tensors[2]
            bias_tensor_type = bias_tensor.tensor.Type()
            # bias tensor type should be INT32 (quantization) or FLOAT32
            assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
            bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
            bias_expr = self.exp_tab.new_const(
                self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str)
            out = _op.nn.bias_add(out, bias_expr)

        # If we have fused activations
        if fused_activation_fn != ActivationFunctionType.NONE:
            out = self.convert_fused_activation_function(
                out, fused_activation_fn)

        return out
示例#3
0
    def __init__(self, op, op_type, tflite_interpreter):
        Layer.__init__(self, op, op_type, tflite_interpreter)

        self.tflite_conv_parser = Conv2DOptions()
        self.tflite_conv_parser.Init(op.BuiltinOptions().Bytes,
                                     op.BuiltinOptions().Pos)