예제 #1
0
    def parse(self):
        logger.debug("Parsing %s...", self.type)
        op = self.tflite
        opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
        assert (opcode in self.TypeMapping)

        assert (op.InputsLength() == 1)
        assert (op.OutputsLength() == 1)

        ilayout = Layout('NHWC', 'NCHW')
        self.parseInput(0, ilayout)

        op_opt = op.BuiltinOptions()
        option = tflite.Pool2DOptions()
        option.Init(op_opt.Bytes, op_opt.Pos)
        self.attrs['auto_pad'] = PaddingMapping[option.Padding()]
        self.attrs['kernel_shape'] = [
            option.FilterHeight(), option.FilterWidth()
        ]
        self.attrs['strides'] = [option.StrideH(), option.StrideW()]

        olayout = Layout('NHWC', 'NCHW')
        ot = self.parseOutput(0, olayout)

        handleFusedActivation(self, option, ot)

        self.setParsed()
예제 #2
0
    def parse(self):
        logger.debug("Parsing %s...", self.type)
        op = self.tflite
        opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
        assert (opcode in self.TypeMapping)

        assert (op.InputsLength() == 2), "TFLite has only two inputs"
        assert (op.OutputsLength() == 1)

        # ONNX Resize doesn't have layout semantic, but TFLite requires NHWC
        ilayout = Layout('NHWC', 'NCHW')
        im = self.parseInput(0, ilayout)

        # ROI and Scale are not optional until Resize v13,
        # currently (v11) we create them as empty initializer.
        # After v13, we can try to not include them in graph
        empty_input = self.TFactory.createEmptyTensor()
        empty_input.addConsumer(self)
        self.inputs.append(empty_input)  # ROI
        self.inputs.append(empty_input)  # Scale

        # output size
        sz = self.parseInput(1)
        # TFLite sizes is (H_new, W_new) while ONNX needs (N, C, H_new,W_new)
        assert len(sz.data) == 2
        assert len(im.shape) == 4
        sz.shape = [len(im.shape)]
        sz.data = np.concatenate(
            (np.array([im.shape[0], im.shape[-1]]), sz.data)).astype('int64')
        sz.dtype = mapping.DTYPE_NAME2ONNX['int64']

        # output
        olayout = Layout('NHWC', 'NCHW')
        self.parseOutput(0, olayout)

        # options
        if opcode is tflite.BuiltinOperator.RESIZE_BILINEAR:
            self.attrs['mode'] = 'linear'
            option = tflite.ResizeBilinearOptions()
        elif opcode is tflite.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR:
            self.attrs['mode'] = 'nearest'
            option = tflite.ResizeNearestNeighborOptions()
        else:
            assert False, "Unreachable path!"

        op_opt = op.BuiltinOptions()
        option.Init(op_opt.Bytes, op_opt.Pos)

        if option.AlignCorners():
            self.attrs['coordinate_transformation_mode'] = 'align_corners'
        elif option.HalfPixelCenters():
            self.attrs['coordinate_transformation_mode'] = 'half_pixel'
        else:
            raise NotImplementedError("This path has not been tried")

        self.setParsed()
예제 #3
0
    def preserveOutputSpatialSemantic(self):
        # If the input (or output) tensor of Reshape
        # holds a special data layout semantic,
        # we need to insert Transpose before (or after) the Reshape operator
        # https://github.com/jackwish/tflite2onnx/issues/28

        # An example for inserting `Transpose` after `Reshape`
        #   -------
        # | Reshape |
        #   -------
        #      |
        #      |  to_transpose (New created tensor)
        #      |  (e.g. NHWC)
        #   -------
        #  | Trans |  e.g. perm: (0, 3, 1, 2)
        #   -------
        #      |
        #      |  transposed (Original `Reshape` output)
        #      |  (e.g. NCHW)
        #    ------
        #   | Conv |
        #    ------
        #

        assert (self.status.parsed)
        transposed = self.outputs[0]

        to_transpose_name = 'TFLITE2ONNX_ToTranspose_%s' % transposed.name
        to_transpose = self.TFactory.getWithRef(transposed, to_transpose_name,
                                                True)

        # Construct a layout as original output of `Reshape`
        # e.g. source: NCHW, target: NHWC
        # Exchange the source and target layout for layout preservation
        layout = Layout(transposed.layout.target, transposed.layout.source)
        to_transpose.shape = layout.transform(transposed.shape)
        to_transpose.setParsed()

        # Construct the additional transpose after `Reshape`
        trans = Transpose(self.TFactory, -1)
        # e.g. From NHWC to NCHW
        trans.attrs['perm'] = transposed.layout.perm

        trans.inputs.append(to_transpose)
        transposed.replaceProducer(self, trans)
        self.replaceOutput(transposed, to_transpose)

        trans.outputs.append(transposed)
        to_transpose.addProducer(self)
        to_transpose.addConsumer(trans)
        trans.setParsed()
        # Rename the new `Transpose` operator
        # to avoid the same name with 'Reshape'
        trans.name = 'TFLITE2ONNX_Transpose_%s' % transposed.name

        self.post.append(trans)
예제 #4
0
    def parse(self):
        logger.debug("Parsing %s...", self.type)
        op = self.tflite
        opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
        assert (opcode in self.TypeMapping)

        assert (op.InputsLength() == 4)
        assert (op.OutputsLength() == 1)

        # input
        ilayout = Layout('NHWC', 'NCHW')
        it = self.parseInput(2, ilayout)

        # weight
        wlayout = Layout('CHWM', 'MCHW')
        wt = self.parseInput(1, wlayout)

        # bias
        self.parseInput(3, is_bias=True)

        # shape
        osi = self.tflite.Inputs(0)
        out_shape = self.TFactory.get(osi, None, False)
        out_shape.parse()

        # output
        olayout = Layout('NHWC', 'NCHW')
        ot = self.parseOutput(0, olayout)

        assert (collections.Counter(ot.shape) == collections.Counter(
            out_shape.data))

        # options
        op_opt = op.BuiltinOptions()
        option = tflite.TransposeConvOptions()
        option.Init(op_opt.Bytes, op_opt.Pos)

        self.attrs['dilations'] = [1, 1]
        self.attrs['group'] = 1
        self.attrs['kernel_shape'] = wt.shape[1:3]
        self.attrs['strides'] = [option.StrideW(), option.StrideH()]
        # XXX Not enabled as ONNXRuntime has limitation to infer pads for non-1 dilation
        # self.attrs['auto_pad'] = PaddingMapping[option.Padding()]
        self.attrs['pads'] = computePaddingSize(option.Padding(),
                                                it.shape[1:3],
                                                self.attrs['kernel_shape'],
                                                self.attrs['strides'],
                                                self.attrs['dilations'])

        # handleFusedActivation(self, option, ot)

        self.setParsed()
예제 #5
0
    def parse(self):
        logger.debug("Parsing %s...", self.type)
        op = self.tflite
        opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
        assert (opcode in self.TypeMapping)

        assert (op.InputsLength() == 3), "TFLite Conv always has bias"
        assert (op.OutputsLength() == 1)

        # input
        ilayout = Layout('NHWC', 'NCHW')
        it = self.parseInput(0, ilayout)

        # weight
        wlayout = Layout('CHWM', 'MCHW') if self.isDepthwise else Layout(
            'OHWI', 'OIHW')
        wt = self.parseInput(1, wlayout)

        # bias
        self.parseInput(2, is_bias=True)

        # output
        olayout = Layout('NHWC', 'NCHW')
        ot = self.parseOutput(0, olayout)

        # options
        op_opt = op.BuiltinOptions()
        option = tflite.DepthwiseConv2DOptions(
        ) if self.isDepthwise else tflite.Conv2DOptions()
        option.Init(op_opt.Bytes, op_opt.Pos)

        self.attrs['dilations'] = [
            option.DilationHFactor(),
            option.DilationWFactor()
        ]
        self.attrs['group'] = wt.shape[3] if self.isDepthwise else 1
        self.attrs['kernel_shape'] = wt.shape[1:3]
        self.attrs['strides'] = [option.StrideH(), option.StrideW()]
        # XXX Not enabled as ONNXRuntime has limitation to infer pads for non-1 dilation
        # self.attrs['auto_pad'] = PaddingMapping[option.Padding()]
        if self.isDepthwise:
            assert (option.DepthMultiplier() == 1)
        self.attrs['pads'] = computePaddingSize(option.Padding(),
                                                it.shape[1:3],
                                                self.attrs['kernel_shape'],
                                                self.attrs['strides'],
                                                self.attrs['dilations'])

        handleFusedActivation(self, option, ot)

        self.setParsed()
예제 #6
0
    def preserveOutputSpatialSemantic(self):
        # https://github.com/jackwish/tflite2onnx/issues/28
        # An example for inserting `Transpose` after `Reshape`
        #   -------
        # | Reshape |
        #   -------
        #      |
        #      |  to_transpose (New created tensor)
        #      |  (e.g. NHWC)
        #   -------
        #  | Trans |  e.g. perm: (0, 3, 1, 2)
        #   -------
        #      |
        #      |  transposed (Original `Reshape` output)
        #      |  (e.g. NCHW)
        #    ------
        #   | Conv |
        #    ------

        assert (self.status.parsed)
        transposed = self.outputs[0]

        to_transpose_name = 'TFLITE2ONNX_ToTranspose_%s' % transposed.name
        to_transpose = self.TFactory.getWithRef(transposed, to_transpose_name,
                                                True)

        # Construct a layout from the original output of `Reshape`
        layout = Layout(transposed.layout.target, transposed.layout.source)
        to_transpose.shape = layout.transform(transposed.shape)
        to_transpose.setParsed()

        # Construct the additional transpose after `Reshape`
        trans = Transpose(self.TFactory, -1)
        trans.attrs['perm'] = transposed.layout.perm

        trans.inputs.append(to_transpose)
        transposed.replaceProducer(self, trans)
        self.replaceOutput(transposed, to_transpose)

        trans.outputs.append(transposed)
        to_transpose.addProducer(self)
        to_transpose.addConsumer(trans)
        trans.setParsed()
        # Rename the new `Transpose` operator avoid the name conflict with 'Reshape'
        trans.name = 'TFLITE2ONNX_Transpose_%s' % transposed.name

        self.post.append(trans)
예제 #7
0
    def preserveInputSpatialSemantic(self):
        # https://github.com/jackwish/tflite2onnx/issues/28
        # An example for inserting `Transpose` before `Reshape`
        #    ------
        #   | Conv |
        #    ------
        #      |
        #      |  to_transpose (Original input of `Reshape`)
        #      |  (e.g. NCHW)
        #   -------
        #  | Trans |  e.g. perm: (0, 2, 3, 1)
        #   -------
        #      |
        #      |  transposed (New created tensor)
        #      |  (e.g. NHWC)
        #   --------
        #  | Reshape |
        #   --------

        assert (self.status.parsed)
        to_transpose = self.inputs[0]

        transposed_name = 'TFLITE2ONNX_Transposed_%s' % to_transpose.name
        transposed = self.TFactory.getWithRef(to_transpose, transposed_name,
                                              True)

        # Construct the layout from the original input of `Reshape`
        layout = Layout(to_transpose.layout.target, to_transpose.layout.source)
        transposed.shape = layout.transform(to_transpose.shape)
        transposed.setParsed()

        # Construct the additional transpose before `Reshape`
        trans = Transpose(self.TFactory, -1)
        trans.attrs['perm'] = layout.perm

        trans.inputs.append(to_transpose)
        to_transpose.replaceConsumer(self, trans)
        self.replaceInput(to_transpose, transposed)

        trans.outputs.append(transposed)
        transposed.addProducer(trans)
        transposed.addConsumer(self)
        trans.setParsed()

        self.pre.append(trans)
예제 #8
0
    def convert(self, explicit_layouts):
        logger.debug("Converting...")

        logger.debug("Handling data layout...")
        for op in self.ops:
            for t in op.inputs + op.outputs:
                if t.name in explicit_layouts:
                    assert (t.layout is None)
                    layouts = explicit_layouts[t.name]
                    assert (len(layouts) == 2)
                    t.layout = Layout(layouts[0], layouts[1])
        self._propagateLayout()
        self._collectOpAndTensor()

        logger.debug("Translating quantization semantic...")
        for t in self.value_info | self.initializer:
            deqt = handleQuantizationTensor(self.TFactory, t)
            for i, o in enumerate(self.outputs):
                if o == t:
                    self.outputs[i] = deqt
        self._collectOpAndTensor()

        logger.debug("Graph:\n%s", str(self))

        self.validate()
        for op in self.op_all:
            op.convert()

        logger.debug("Making ONNX...")
        onodes = [n.onnx for n in self.op_all]
        oinputs = [t.onnx for t in self.inputs]
        ooutputs = [t.onnx for t in self.outputs]
        initializer = [t.onnx for t in self.initializer]
        value_info = [t.onnx for t in self.value_info]

        self.onnx = helper.make_graph(onodes,
                                      'pre-alpha',
                                      oinputs,
                                      ooutputs,
                                      initializer=initializer,
                                      value_info=value_info)
        self.setConverted()
예제 #9
0
    def parse(self):
        logger.debug("Parsing %s...", self.type)
        op = self.tflite
        opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()

        assert (opcode in self.TypeMapping)
        assert (op.InputsLength() == 3)
        assert (op.OutputsLength() == 1)

        # oshape
        osi = op.Inputs(0)
        oshape = self.TFactory.getData(self.model, self.graph, osi, 'int32')

        # X
        ilayout = Layout('NHWC', 'NCHW')
        self.parseInput(2, ilayout)

        # weight
        wlayout = Layout('OHWI', 'IOHW')
        wt = self.parseInput(1, wlayout)

        # FIXME: we don't have a model containing bias.

        # output
        olayout = Layout('NHWC', 'NCHW')
        ot = self.parseOutput(0, olayout)
        assert ((ot.shape == oshape).all())

        # options
        op_opt = op.BuiltinOptions()
        option = tflite.TransposeConvOptions()
        option.Init(op_opt.Bytes, op_opt.Pos)

        self.attrs['kernel_shape'] = wt.shape[1:3]
        self.attrs['strides'] = [option.StrideH(), option.StrideW()]
        oslayout = Layout('NHWC', 'NCHW')
        self.attrs['output_shape'] = oslayout.transform(oshape)
        self.setParsed()