Пример #1
0
    def _common(cls, node: TFLiteNode, **kwargs):
        custom_opts = node.get_custom_options()
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']
        importer = kwargs['importer']

        inputs = [all_nodes[t] for t in node.input]
        outputs = [
            all_nodes.get(node.output[idx]) if idx < len(node.output) else None
            for idx in range(4)
        ]
        # inp_shapes = [input[2].shape for input in inputs]

        if 'max_bb_before_nms' not in custom_opts:
            custom_opts['max_bb_before_nms'] = 300

        params = SSDDetectorParameters(node.name, parameters=custom_opts)

        overriden_outputs = []
        for idx, output in enumerate(outputs):
            if output:
                overriden_outputs.append(node.output[idx])
                continue
            oparams = G.add_output()
            otensor = TensorBase("Detect_%s" % idx)
            overriden_outputs.append(otensor)
            importer.provisional_outputs[otensor] = (oparams, 0, None)
        # covers the case where not all outputs are generated by the conversion tool
        node.override_outputs(overriden_outputs)

        for idx, inp in enumerate(inputs):
            G.add_edge(
                NNEdge(from_node=inp[0],
                       to_node=params,
                       from_idx=inp[1],
                       to_idx=idx))

        if opts.get('load_quantization'):
            in_qtypes = [
                QType.from_min_max_sq(tensor.qtype.min_val,
                                      tensor.qtype.max_val) if
                (tensor.qtype.is_asymmetric
                 or not tensor.qtype.signed) else tensor.qtype
                for tensor in node.input
            ]
            o_boxes_qtype = QType(min_val=-2,
                                  max_val=2,
                                  dtype=np.int16,
                                  scale=2**(-14))
            o_scores_qtype = node.input[1].qtype
            o_class_qtype = QType(scale=1, dtype=np.int8)
            qrec = QRec.scaled(in_qs=in_qtypes,
                               out_qs=[
                                   o_boxes_qtype, o_class_qtype,
                                   o_scores_qtype, o_class_qtype
                               ])
            G.quantization[NodeId(params)] = qrec

        return params
Пример #2
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ReshapeOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # TF2 seems to use the second input whereas TF1 uses the opts
        new_shape = None
        if node_opts:
            new_shape = list(node_opts.NewShapeAsNumpy())
        elif len(inputs) > 1:
            set_shape_tensor = list(cls._verify_constant(inputs[1]))
            node.input[1].used = True
            new_shape = list(set_shape_tensor)
        else:
            ValueError(
                f"Cannot asses new_shape for Reshape Parameter: {node.name}")

        if -1 in new_shape:
            new_shape_size = reduce(lambda x, y: x * 1
                                    if y == -1 else x * y, new_shape, 1)
            inp_size = reduce(lambda x, y: x * y
                              if y is not None else x, x_shape, 1)
            new_shape[new_shape.index(-1)] = inp_size // new_shape_size

        if None in x_shape:
            if 1 in new_shape:
                old_batch_dim = x_shape.index(None)
                new_batch_dim = new_shape.index(1)
                if old_batch_dim != new_batch_dim:
                    LOG.info(
                        "node %s moved batch dimension for axis %s to axis %s",
                        node.name, old_batch_dim, new_batch_dim)
                new_shape[new_batch_dim] = None
            else:
                raise ValueError(
                    "unable to determine movement of unspcified axis in node %s"
                    % node.name)

        pnew_shape = ProvisionalDim(new_shape)
        old_shape = Dim.unnamed(cls.remove_unspecified_dim(x_shape),
                                is_ordered=True)
        new_shape = Dim.unnamed(cls.remove_unspecified_dim(new_shape),
                                is_ordered=True)

        params = ReshapeParameters(node.name,
                                   old_shape=old_shape,
                                   shape=new_shape)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pnew_shape)
        return params
Пример #3
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(CastOptions)
        G = kwargs['G']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]

        if node_opts:
            in_dtype = TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[
                node_opts.InDataType()]
            out_dtype = TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[
                node_opts.OutDataType()]
        else:
            in_dtype = out_dtype = None
        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            val = cls.get_constant(x)
            if out_dtype:
                val = val.astype(out_dtype)
            params = ConstantInputParameters(node.name, value=val)
        else:
            params = CastParameters(node.name,
                                    in_dtype=in_dtype,
                                    out_dtype=out_dtype)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))

        all_nodes[node.output[0]] = (params, 0, deepcopy(x[2]))
        return params
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(TransposeConvOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[2]
        x_shape = x[2].shape
        in_b, in_h, in_w, in_c = tuple(x_shape)
        pout_shape = [
            dim if x_shape[idx] is not None else None
            for idx, dim in enumerate(cls.get_constant(inputs[0]))
        ]
        out_b, out_h, out_w, out_c = tuple(pout_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        stride_w = node_opts.StrideW()
        stride_h = node_opts.StrideH()
        # compute padding
        pad = node_opts.Padding()
        if pad == Padding.SAME:
            pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
            pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
            pad_top = pad_h // 2
            pad_left = pad_w // 2
            pad = PadDim(pad_top,
                         pad_h - pad_top,
                         pad_left,
                         pad_w - pad_left,
                         same_type='balanced_right')
        else:
            pad = PadDim(0)

        params = TransposeConv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(stride_h, stride_w),
            padding=pad,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy()],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        pout_dims = ProvisionalDim(pout_shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Пример #5
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(StridedSliceOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # begin end stride
        vec_begin = list(cls._verify_constant(inputs[1]))
        vec_end = list(cls._verify_constant(inputs[2]))
        vec_stride = list(cls._verify_constant(inputs[3]))
        for i in range(1, 4):
            node.input[i].used = True
        if any([vec is None for vec in [vec_begin, vec_end, vec_stride]]):
            raise NotImplementedError(
                "strided slice with variable begin end or stride is not supported")
        spec = zip(vec_begin, vec_end, vec_stride)
        begin_mask = node_opts.BeginMask()
        ellipsis_mask = node_opts.EllipsisMask()
        end_mask = node_opts.EndMask()
        new_axis_mask = node_opts.NewAxisMask()
        shrink_axis_mask = node_opts.ShrinkAxisMask()

        act_slice, out_shape, can_reshape = StridedSliceParameters.get_slice(
            x_shape, spec,
            begin_mask,
            end_mask, ellipsis_mask,
            new_axis_mask, shrink_axis_mask)

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            x_val = cls.get_constant(x)
            params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            x_val = params.numpy_slice(x_val)
            params = ConstantInputParameters(node.name, value=x_val)
        else:
            if can_reshape:
                if list(x_shape) == list(out_shape):
                    LOG.info("converting strided slice %s to a noop", node.name)
                    params = NoOPParameters(node.name)
                else:
                    LOG.info("converting strided slice %s to a reshape", node.name)
                    in_shape = Dim.unnamed(x[2].known_shape, is_ordered=True)
                    out_shape = Dim.unnamed(out_shape, is_ordered=True)
                    params = ReshapeParameters(node.name, old_shape=in_shape, shape=out_shape)
            else:
                params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization([node.input[0]], node.output)
        all_nodes[node.output[0]] = (params, 0, x[2].infer_mapping(out_shape, allow_bad_length=True))
        return params
Пример #6
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ConcatenationOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        non_zero_idxes = [idx for idx in buffer_idxes if idx != 0]
        if len(set(non_zero_idxes)) != len(non_zero_idxes):
            raise NotImplementedError(
                "concats with multiple versions of the same input are not supported. "
                "This is normally a graph design problem.")

        axis = node_opts.Axis()
        if any(inp_shape[axis] is None for inp_shape in inp_shapes):
            raise ValueError("concat on undefined axis in node %s" % node.name)

        def red_func(x, y):
            return y.copy() if x is None else [
                (elem if y[idx] is not None and elem is not None else None)
                if idx != axis else elem + y[axis]
                for idx, elem in enumerate(x)
            ]

        pout_shape = reduce(red_func, inp_shapes)

        if all(cls.is_constant(inp) for inp in inputs):
            # cls.remove_none_from_constants(inputs, pout_shape)
            LOG.info("reducing %s to a constant", node.name)
            value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                   axis=axis)
            params = ConstantInputParameters(node.name,
                                             value=value,
                                             constant_store=G.constant_store)
        else:
            axis -= sum(1 if dim is None else 0 for dim in pout_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            for idx, inp in enumerate(inputs):
                inp_node, inp_idx = cls._maybe_insert_reshape(
                    G, inp, inp_shapes[idx], pout_shape)
                G.add_edge(
                    NNEdge(from_node=inp_node,
                           to_node=params,
                           from_idx=inp_idx,
                           to_idx=idx))
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)
        cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
Пример #7
0
 def __init__(self, model: Model, subgraph: SubGraph,
              subgraph_idx: int) -> None:
     self._model = model
     self._subgraph = subgraph
     self._subgraph_idx = subgraph_idx
     self._tensors = [
         TFLiteTensorWrapper(self._subgraph.Tensors(idx), self._model)
         for idx in range(self._subgraph.TensorsLength())
     ]
     self._nodes = [
         TFLiteNode(self._subgraph.Operators(idx), idx, self._model, self)
         for idx in range(self._subgraph.OperatorsLength())
     ]
Пример #8
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(GatherOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        gather_tensor = list(cls._verify_constant(inputs[1]))
        axis = node_opts.Axis()

        if cls._is_constant(x):
            inp = cls._get_constant(x)
            inp = np.take(inp, gather_tensor, axis=axis)
            pout_shape = inp.shape
            LOG.info("reducing %s to a constant", node.name)
            params = ConstantInputParameters(node.name,
                                             value=inp,
                                             dims=pout_shape)
        else:
            if x_shape[axis] is None:
                raise ValueError(
                    f'GATHER {node.name} on batch axis not supported')
            slices = [sequence_to_slice(elem) for elem in gather_tensor]
            strides = set([
                slices[idx + 1][0] - slice[0]
                for idx, slice in enumerate(slices[:-1:])
            ])
            lengths = set([abs(slice[1] - slice[0]) for slice in slices])
            if len(strides) != 1 or len(lengths) != 1:
                raise ValueError(f'Irregular GATHER {node.name} not supported')
            out_len = sum(len(slice) for slice in slices)
            pout_shape = x_shape.copy()
            pout_shape[axis] = out_len
            axis -= sum(1 if dim is None else 0 for dim in x_shape[:axis:])
            LOG.info("reducing %s to an overlapped copy", node.name)
            params = GatherParameters(node.name,
                                      indices=gather_tensor,
                                      axis=axis)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
Пример #9
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(CastOptions)
        G = kwargs['G']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]

        if node_opts:
            in_qtype = QType(dtype=TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[node_opts.InDataType()])
            out_qtype = QType(dtype=TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[node_opts.OutDataType()])
            return cls.common_quantize(in_qtype, out_qtype, node, **kwargs)

        params = NoOPParameters(node.name, desc='cast with no type information')
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, deepcopy(x[2]))
        return params
Пример #10
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(SqueezeOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        if node_opts.SqueezeDimsIsNone():
            new_shape = [dim for dim in x_shape if dim != 1]
        else:
            axes = node_opts.SqueezeDimsAsNumpy()
            axes = np.where(axes < 0, axes + len(x_shape), axes)
            if np.any(np.array(x_shape)[axes] not in [None, 1]):
                raise ValueError(f'invalid expand dims > 1 {node.name}')
            new_shape = [
                dim for idx, dim in enumerate(x_shape) if idx not in axes
            ]

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            val = np.reshape(cls.get_constant(x), new_shape)
            params = ConstantInputParameters(node.name,
                                             value=val,
                                             dims=Dim.unnamed(val.shape))
        else:
            pnew_shape = ProvisionalDim(new_shape)
            old_shape = Dim.unnamed(cls.remove_unspecified_dim(x_shape),
                                    is_ordered=True)
            new_shape = Dim.unnamed(cls.remove_unspecified_dim(new_shape),
                                    is_ordered=True)
            params = ReshapeParameters(node.name,
                                       old_shape=old_shape,
                                       shape=new_shape)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pnew_shape)
        return params
Пример #11
0
 def __init__(self,
              model: Model,
              subgraph: SubGraph,
              subgraph_idx: int,
              name_cache=None,
              anonymise=False) -> None:
     self._model = model
     self._subgraph = subgraph
     self._subgraph_idx = subgraph_idx
     self._tensors = [
         TFLiteTensorWrapper(self._subgraph.Tensors(idx), self._model)
         for idx in range(self._subgraph.TensorsLength())
     ]
     self._nodes = [
         TFLiteNode(self._subgraph.Operators(idx),
                    idx,
                    self._model,
                    self,
                    name_cache=name_cache,
                    anonymise=anonymise)
         for idx in range(self._subgraph.OperatorsLength())
     ]
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(DepthwiseConv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)

        # multiplier should match filter
        check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c,
              "invalid multiplier")

        groups = filt_dim.out_c // node_opts.DepthMultiplier()

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        # TFLITE produces single channel input DW convolutions with the
        # multiplier equal to the number of out channels. This is just
        # a normal convolution and since we don't handle the channel
        # multiplier at present (but can) just convert them to normal
        # convolutions
        convert_to_conv = in_c == 1 and groups == 1

        if convert_to_conv:
            filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                has_bias=True,
                in_dims_hint=[['h', 'w', 'c'],
                              cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']],
                out_dims_hint=[['h', 'w', 'c']])
        else:
            filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                groups=groups,
                multiplier=node_opts.DepthMultiplier(),
                has_bias=True,
                tf_depthwise=True,
                in_dims_hint=[['h', 'w', 'c'],
                              cls.TF_LITE_DW_FILTER_ORDER.copy(), ['out_c']],
                out_dims_hint=[['h', 'w', 'c']])

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G,
                                       params,
                                       params.filter.actual_shape,
                                       params.filter.get_order_idx('out_c'),
                                       node.input[0],
                                       weights_node,
                                       bias_node,
                                       node.output[0],
                                       opts,
                                       dw_to_pw=convert_to_conv)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Пример #13
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(PackOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        values_count = node_opts.ValuesCount()
        check(
            len(inputs) == values_count,
            "invalid tflite file - values count not equal to inputs")

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        check(
            len(set(buffer_idxes)) == len(buffer_idxes),
            "packs with multiple versions of the same input are not supported. This is normally a graph design problem."
        )

        axis = node_opts.Axis()
        dimension_size = len(inp_shapes)
        if axis < 0:
            axis += dimension_size

        check(all(shape == inp_shapes[0] for shape in inp_shapes[1::]),
              "invalid tflite file - pack inputs not the same")

        # prepare shapes of all tensors
        pconcat_out_shape = inp_shapes[0].copy()
        pconcat_out_shape.insert(axis, values_count)

        pconcat_in_shape = inp_shapes[0].copy()
        pconcat_in_shape.insert(axis, 1)

        preshape_in_shape = inp_shapes[0].copy()

        # remove nones from constants
        cls.remove_none_from_constants(inputs, preshape_in_shape)

        # remove nones from reshape shapes
        reshape_in_shape = cls.remove_unspecified_dim(preshape_in_shape)
        concat_in_shape = cls.remove_unspecified_dim(pconcat_in_shape)

        if all(cls.is_constant(inp) for inp in inputs):
            LOG.info("reducing %s to a constant", node.name)
            value = np.stack([cls.get_constant(inp) for inp in inputs],
                             axis=axis)
            params = ConstantInputParameters(node.name,
                                             value=value,
                                             constant_store=G.constant_store)
        else:
            axis -= sum(1 if dim is None else 0
                        for dim in pconcat_out_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            # insert reshapes on each input to add concat axis
            for idx, inp in enumerate(inputs):
                rparams = ReshapeParameters(node.name + "_%s" % idx,
                                            old_shape=reshape_in_shape,
                                            shape=concat_in_shape)
                G.add_edge(
                    NNEdge(from_node=inp[0], to_node=rparams, from_idx=inp[1]))
                G.add_edge(NNEdge(from_node=rparams, to_node=params))
                if opts.get('load_quantization'):
                    G.quantization[NodeId(rparams)] = cls.load_tf_quantization(
                        [node.input[idx]], [node.input[idx]])

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)

        all_nodes[node.output[0]] = (params, 0,
                                     ProvisionalDim(pconcat_out_shape))
        return params
Пример #14
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            padding=pad,
            has_bias=True,
            in_dims_hint=SparseList([['h', 'w', 'c'],
                                     cls.TF_LITE_FILTER_ORDER.copy(),
                                     ['out_c']]),
            out_dims_hint=SparseList([['h', 'w', 'c']]),
            constant_store=G.constant_store)
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)
        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value,
        #                                                                            node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Пример #15
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(DepthwiseConv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        filt_tensor = node.input[1]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape)

        # get filter dimensions
        filt_tensor.used = True
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)

        # multiplier should match filter
        check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c,
              "invalid multiplier")

        groups = filt_dim.out_c // node_opts.DepthMultiplier()

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        has_bias = len(inputs) > 2
        if has_bias:
            node.input[2].used = True

        # TFLITE produces single channel input DW convolutions with the
        # multiplier equal to the number of out channels. This is just
        # a normal convolution and since we don't handle the channel
        # multiplier at present (but can) just convert them to normal
        # convolutions
        convert_to_conv = in_c == 1 and groups == 1

        if convert_to_conv:
            filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                has_bias=has_bias,
                in_dims_hint=SparseList([['h', 'w', 'c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)
        else:
            filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                groups=groups,
                multiplier=node_opts.DepthMultiplier(),
                has_bias=has_bias,
                tf_depthwise=True,
                in_dims_hint=SparseList([['h', 'w', 'c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params,
                                                   node.input,
                                                   convert_to_conv,
                                                   is_dw=True)
        else:
            cls.load_filter_parameters(G,
                                       params,
                                       node.input,
                                       node.output,
                                       opts,
                                       converted_to_conv=convert_to_conv)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Пример #16
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ConcatenationOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        non_zero_idxes = [idx for idx in buffer_idxes if idx != 0]
        duplicates = [
            idx for idx, count in Counter(non_zero_idxes).items() if count > 1
        ]
        if duplicates:
            LOG.warning(
                f'concat {node.name} has duplicate inputs. Inserting copies but this is not very efficient.'
            )
            for idx in duplicates:
                dup_idxes = [i for i, x in enumerate(buffer_idxes) if x == idx]
                for dup_idx in dup_idxes[1:]:
                    cparams = CopyParameters(
                        G.unique_name(
                            f'{node.name}_dup_{dup_idxes[0]}_{dup_idx}'))
                    dup_inp = inputs[dup_idx]
                    G.add_edge(
                        NNEdge(from_node=dup_inp[0],
                               from_idx=dup_inp[1],
                               to_node=cparams))
                    inputs[dup_idx] = tuple([cparams, 0] + list(dup_inp[2:]))

        axis = node_opts.Axis()
        if any(inp_shape[axis] is None for inp_shape in inp_shapes):
            raise ValueError("concat on undefined axis in node %s" % node.name)

        def red_func(x, y):
            return y.copy() if x is None else [
                (elem if y[idx] is not None and elem is not None else None)
                if idx != axis else elem + y[axis]
                for idx, elem in enumerate(x)
            ]

        pout_shape = reduce(red_func, inp_shapes)

        if all(cls.is_constant(inp) for inp in inputs):
            # cls.remove_none_from_constants(inputs, pout_shape)
            LOG.info("reducing %s to a constant", node.name)
            value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                   axis=axis)
            params = ConstantInputParameters(node.name, value=value)
        else:
            axis -= sum(1 if dim is None else 0 for dim in pout_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            for idx, inp in enumerate(inputs):
                inp_node, inp_idx = cls._maybe_insert_reshape(
                    G, inp, inp_shapes[idx], pout_shape)
                G.add_edge(
                    NNEdge(from_node=inp_node,
                           to_node=params,
                           from_idx=inp_idx,
                           to_idx=idx))
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)
        cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
Пример #17
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        filt_tensor = node.input[1]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        filt_tensor.used = True
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        has_bias = len(inputs) > 2
        if has_bias:
            node.input[2].used = True

        params = Conv2DParameters(node.name,
                                  filt=filt_dim,
                                  stride=StrideDim(node_opts.StrideH(),
                                                   node_opts.StrideW()),
                                  dilation=DilationDim(
                                      node_opts.DilationHFactor(),
                                      node_opts.DilationWFactor()),
                                  padding=pad,
                                  has_bias=has_bias,
                                  in_dims_hint=SparseList([['h', 'w', 'c']]),
                                  out_dims_hint=SparseList([['h', 'w', 'c']]),
                                  constant_store=G.constant_store)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Пример #18
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check
        groups = in_c // filt_in_c
        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            groups=groups,
            padding=pad,
            has_bias=True,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, params.filter.actual_shape,
                                       params.filter.get_order_idx('out_c'),
                                       node.input[0], weights_node, bias_node,
                                       node.output[0], opts)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([None] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        oparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (oparams, 0, pout_dims)
        return oparams