示例#1
0
    def convert_pool2d(self, op, pool_type):
        """pool2d implementation."""
        try:
            from tflite.BuiltinOptions import BuiltinOptions
            from tflite.ActivationFunctionType import ActivationFunctionType
            from tflite.Operator import Operator
            from tflite.Pool2DOptions import Pool2DOptions
            from tflite.Padding import Padding
        except ImportError:
            raise ImportError("The tflite package must be installed")

        assert isinstance(op, Operator)
        input_tensors = self.get_input_tensors(op)
        assert len(input_tensors) == 1, "input tensors length should be 1"
        input_tensor = input_tensors[0]
        input_tensor_idx = input_tensor.tensor_idx

        assert op.BuiltinOptionsType() == BuiltinOptions.Pool2DOptions
        op_options = op.BuiltinOptions()
        pool2d_options = Pool2DOptions()
        pool2d_options.Init(op_options.Bytes, op_options.Pos)
        stride_h = pool2d_options.StrideH()
        stride_w = pool2d_options.StrideW()
        padding = pool2d_options.Padding()
        filter_h = pool2d_options.FilterHeight()
        filter_w = pool2d_options.FilterWidth()
        fused_activation_fn = pool2d_options.FusedActivationFunction()

        params = {
            'pool_size': (filter_h, filter_w),
            'strides': (stride_h, stride_w),
            'padding': [0, 0]
        }

        in_expr = self.get_expr(input_tensor_idx)

        _, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()
        if padding == Padding.VALID:
            pass
        elif padding == Padding.SAME:
            pad_top, pad_bottom = get_pad_value(input_h, filter_h, stride_h)
            pad_left, pad_right = get_pad_value(input_w, filter_w, stride_w)
            params['padding'] = [pad_top, pad_left, pad_bottom, pad_right]
        else:
            raise NotImplementedError(
                "Not support padding format: {}".format(padding))

        if pool_type == "average":
            out = _op.nn.avg_pool2d(in_expr, **params)
        elif pool_type == "max":
            out = _op.nn.max_pool2d(in_expr, **params)
        else:
            raise ValueError("Not support pool type: {}".format(pool_type))

        # If we have fused activations
        if fused_activation_fn != ActivationFunctionType.NONE:
            out = self.convert_fused_activation_function(
                out, fused_activation_fn)

        return out
示例#2
0
 def parse_options(self):
     options = self._flatbuf_op.BuiltinOptions()
     o = Pool2DOptions()
     o.Init(options.Bytes, options.Pos)
     self.padding = self.padding_schemes[o.Padding()]
     self.stride = (o.StrideH(), o.StrideW())
     self.filter_size = (o.FilterWidth(), o.FilterHeight())
     self.fused_activation_function = self.activation_types[
         o.FusedActivationFunction()]
     self._flatbuf_options_obj = options
     self._supported_options = [
         'padding', 'stride', 'filter_size', 'fused_activation_function'
     ]
示例#3
0
 def _macs_for_op(op: TFLiteOperator, mem_access_weight=0, compute_weight=1):
     loads, compute = 0, 0
     if op.opcode == BuiltinOperator.CONV_2D:
         input, kernel, bias = op.inputs
         o_c, k_h, k_w, i_c = kernel.shape
         n, o_h, o_w, _ = op.output.shape
         work = n * o_h * o_w * o_c * k_h * k_w * i_c
         loads, compute = 2 * work, work
         if bias is not None:
             loads += n * o_h * o_w * o_c
     if op.opcode == BuiltinOperator.DEPTHWISE_CONV_2D:
         input, kernel, bias = op.inputs
         _, k_h, k_w, c = kernel.shape
         n, o_h, o_w, _ = op.output.shape
         work = n * c * o_h * o_w * k_h * k_w
         loads, compute = 2 * work, work
         if bias is not None:
             loads += n * c * o_h * o_w
     if op.opcode in [BuiltinOperator.MEAN]:
         # TODO: this is global pooling, verify before proceeding
         n, i_h, i_w, c = op.inputs[0].shape
         work = n * i_h * i_w * c
         loads, compute = work, work
     if op.opcode in [BuiltinOperator.MAX_POOL_2D, BuiltinOperator.AVERAGE_POOL_2D]:
         from tflite.Pool2DOptions import Pool2DOptions
         opt = Pool2DOptions()
         opt._tab = op.options
         n, o_h, o_w, c = op.output.shape
         pool_h, pool_w = opt.FilterHeight(), opt.FilterWidth()
         work = n * o_h * o_w * c * pool_h * pool_w
         loads, compute = work, work
     if op.opcode == BuiltinOperator.FULLY_CONNECTED:
         input, kernel, bias = op.inputs
         n, out_dim = op.output.shape
         in_dim = input.shape[-1]
         work = n * in_dim * out_dim
         loads, compute = 2 * work, work
         if bias is not None:
             loads += n * out_dim
     if op.opcode == BuiltinOperator.ADD:
         # TODO: not precise when inputs are of different shapes
         num_terms = len(op.inputs)
         elems_per_term = np.prod(op.output.shape)
         loads = num_terms * elems_per_term
         compute = (num_terms - 1) * elems_per_term
     return mem_access_weight * loads + compute_weight * compute
示例#4
0
    def __init__(self, op, op_type, tflite_interpreter):
        Layer.__init__(self, op, op_type, tflite_interpreter)

        self.tflite_avgpool_parser = Pool2DOptions()
        self.tflite_avgpool_parser.Init(self.op.BuiltinOptions().Bytes,
                                        self.op.BuiltinOptions().Pos)