コード例 #1
0
    def convert_ops(self, sess):
        for tf_op in self._tf_graph.get_operations():
            # print(__file__,sys._getframe().f_lineno,"tp_op=",tf_op)
            mace_check(
                tf_op.type in self._op_converters,
                "Mace does not support tensorflow op type %s yet" % tf_op.type)

            self._op_converters[tf_op.type](tf_op)
        self.update_output_shapes(sess)
        self.convert_tensors()
コード例 #2
0
    def convert_subpixel(self, tf_op):
        op = self._mace_net_def.op.add()

        index = tf_op.name.find('/')
        op.name = tf_op.name[0:index] + '/Subpixel'
        op.type = MaceOp.Subpixel.name

        op.input.extend([self._subpixel_Ops["input"].inputs[0].name])
        op.output.extend([
            tf_output.name
            for tf_output in self._subpixel_Ops["output"].outputs
        ])

        inputDims = self.infer_tensor_shape_old(
            self._subpixel_Ops["input"].inputs[0])
        outputDims = self.infer_tensor_shape_old(
            self._subpixel_Ops["output"].outputs[0])

        scale = math.sqrt(inputDims[3] / outputDims[3])

        subpiexl_scale_arg = op.arg.add()
        subpiexl_scale_arg.name = MaceKeyword.mace_subpiexl_scale
        subpiexl_scale_arg.i = int(scale)

        for tf_output in self._subpixel_Ops["output"].outputs:
            output_shape = op.output_shape.add()
            self.infer_tensor_shape(output_shape, tf_output)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'

        try:
            dtype = self._subpixel_Ops["input"].get_attr('T')
            # print("dtype=",dtype)
            if dtype == tf.int32:
                data_type_arg.i = tensorrt_pb2.DT_INT32
            elif dtype == tf.float32:
                data_type_arg.i = self._option.data_type
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            try:
                dtype = self._subpixel_Ops["input"].get_attr('SrcT')
                if dtype == tf.int32 or dtype == tf.bool:
                    data_type_arg.i = tensorrt_pb2.DT_INT32
                elif dtype == tf.float32:
                    data_type_arg.i = self._option.data_type
                else:
                    mace_check(False, "data type %s not supported" % dtype)
            except ValueError:
                data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)
コード例 #3
0
    def convert_cast(self, tf_op):
        op = self.convert_general_op(tf_op)
        op.type = MaceOp.Cast.name

        try:
            dtype = tf_op.get_attr('DstT')
            if dtype == tf.int32:
                op.output_type.extend([tensorrt_pb2.DT_INT32])
            elif dtype == tf.float32:
                op.output_type.extend([self._option.data_type])
            elif dtype == tf.uint8:
                op.output_type.extend([tensorrt_pb2.DT_UINT8])
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            op.output_type.extend([self._option.data_type])
コード例 #4
0
    def convert_tensors(self):
        for tf_op in self._tf_graph.get_operations():
            if tf_op.type != TFOpType.Const.name:
                continue
            output_name = tf_op.outputs[0].name
            if output_name not in self._skip_tensor:
                tensor = self._mace_net_def.tensors.add()
                tensor.name = tf_op.outputs[0].name
                tf_tensor = tf_op.outputs[0].eval()
                tensor.dims.extend(list(tf_tensor.shape))

                tf_dt = tf_op.get_attr('dtype')
                if tf_dt == tf.float32:
                    tensor.data_type = tensorrt_pb2.DT_FLOAT
                    tensor.float_data.extend(tf_tensor.astype(np.float32).flat)
                elif tf_dt == tf.int32:
                    tensor.data_type = tensorrt_pb2.DT_INT32
                    tensor.int32_data.extend(tf_tensor.astype(np.int32).flat)
                else:
                    mace_check(False,
                               "Not supported tensor type: %s" % tf_dt.name)
コード例 #5
0
    def convert_conv2d(self, tf_op):
        op = self.convert_general_op(tf_op)
        if tf_op.type == TFOpType.DepthwiseConv2dNative.name:
            op.type = MaceOp.DepthwiseConv2d.name
        elif tf_op.type == TFOpType.Conv2DBackpropInput.name:
            op.type = MaceOp.Deconv2D.name
        else:
            op.type = MaceOp.Conv2D.name

        padding_arg = op.arg.add()
        padding_arg.name = MaceKeyword.mace_padding_str
        padding_arg.i = self.padding_mode[tf_op.get_attr(tf_padding_str)].value
        strides_arg = op.arg.add()
        strides_arg.name = MaceKeyword.mace_strides_str
        strides_arg.ints.extend(tf_op.get_attr(tf_strides_str)[1:3])
        if op.type != MaceOp.Deconv2D.name:
            dilation_arg = op.arg.add()
            dilation_arg.name = MaceKeyword.mace_dilations_str
            try:
                dilation_val = tf_op.get_attr(tf_dilations_str)[1:3]
            except ValueError:
                dilation_val = [1, 1]
            dilation_arg.ints.extend(dilation_val)
        else:
            mace_check(
                len(tf_op.inputs) >= 3, "deconv should have (>=) 3 inputs.")
            output_shape_arg = op.arg.add()
            output_shape_arg.name = MaceKeyword.mace_output_shape_str
            if tf_op.inputs[0].op.type == TFOpType.Const.name:
                output_shape_value = \
                    tf_op.inputs[0].eval().astype(np.int32).flat
                output_shape_arg.ints.extend(output_shape_value)
            else:
                output_shape_value = {}
                output_shape_arg.ints.extend(output_shape_value)
            del op.input[:]
            op.input.extend([
                tf_op.inputs[2].name, tf_op.inputs[1].name,
                tf_op.inputs[0].name
            ])
コード例 #6
0
    def convert_general_op(self, tf_op):
        op = self._mace_net_def.op.add()
        op.name = tf_op.name
        op.type = tf_op.type
        op.input.extend([tf_input.name for tf_input in tf_op.inputs])
        op.output.extend([tf_output.name for tf_output in tf_op.outputs])
        for tf_output in tf_op.outputs:
            output_shape = op.output_shape.add()
            self.infer_tensor_shape(output_shape, tf_output)

        data_type_arg = op.arg.add()
        data_type_arg.name = 'T'
        try:
            dtype = tf_op.get_attr('T')
            if dtype == tf.int32:
                data_type_arg.i = tensorrt_pb2.DT_INT32
            elif dtype == tf.float32:
                data_type_arg.i = self._option.data_type
            elif dtype == tf.uint8:
                data_type_arg.i = tensorrt_pb2.DT_UINT8
            else:
                mace_check(False, "data type %s not supported" % dtype)
        except ValueError:
            try:
                dtype = tf_op.get_attr('SrcT')
                if dtype == tf.int32 or dtype == tf.bool:
                    data_type_arg.i = tensorrt_pb2.DT_INT32
                elif dtype == tf.float32:
                    data_type_arg.i = self._option.data_type
                elif dtype == tf.uint8:
                    data_type_arg.i = tensorrt_pb2.DT_UINT8
                else:
                    mace_check(False, "data type %s not supported" % dtype)
            except ValueError:
                data_type_arg.i = self._option.data_type

        ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)

        return op