コード例 #1
0
ファイル: pool_layers.py プロジェクト: jwj04ok/ONNX_Convertor
  def generate(self):

      node_output_detail = self.tflite_interpreter._get_tensor_details(self.op.Outputs(0))
      node_input_detail = self.tflite_interpreter._get_tensor_details(self.op.Inputs(0))

      kernel_shape = [self.tflite_avgpool_parser.FilterHeight(),self.tflite_avgpool_parser.FilterWidth()]
      strides_len = [self.tflite_avgpool_parser.StrideH(),self.tflite_avgpool_parser.StrideW()]

      padding_stradegy = 'NONE' 
      if self.tflite_avgpool_parser.Padding() is Padding.SAME:
          padding_stradegy = 'SAME' 
      elif self.tflite_avgpool_parser.Padding() is Padding.VALID:
          padding_stradegy = 'VALID' 

      input_feature_map_shape = node_input_detail['shape']

      avg_pool_name = self.node_name
      avg_pool_node = helper.make_node(
          'AveragePool',
          inputs=self.input_nodes_name,
          outputs=[avg_pool_name],
          kernel_shape=kernel_shape,
          strides=strides_len,
          pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape, strides_len, None, padding_stradegy),
          name=avg_pool_name
      )
      out_shape_info = helper.make_tensor_value_info(
          avg_pool_name,
          TensorProto.FLOAT,
          tflite_utils.tflite2onnx_shape_map(node_output_detail['shape'].tolist())
      )

      # update tables
      self.value_infos.append(out_shape_info)
      self.node_list.append(avg_pool_node)

      #Generate Quantization Info and Reverse Quantization for Weights and Bias
      output_quantization_info = node_output_detail.get("quantization_parameters", {})
      output_quantization_info["dtype"] = str(node_output_detail["dtype"]).split(".")[1].split("'")[0]
      input_quantization_info = node_input_detail.get("quantization_parameters", {})
      input_quantization_info["dtype"] = str(node_input_detail["dtype"]).split(".")[1].split("'")[0]
      quantization_info = {}
      quantization_info[self.input_nodes_name[0]] = input_quantization_info
      quantization_info[avg_pool_name] = output_quantization_info

      return self.node_list, self.value_infos, self.weight_node_list, quantization_info
コード例 #2
0
ファイル: pool_layers.py プロジェクト: kneron/ONNX_Convertor
    def generate(self):

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        kernel_shape = [
            self.tflite_avgpool_parser.FilterHeight(),
            self.tflite_avgpool_parser.FilterWidth()
        ]
        strides_len = [
            self.tflite_avgpool_parser.StrideH(),
            self.tflite_avgpool_parser.StrideW()
        ]

        padding_stradegy = 'NONE'
        if self.tflite_avgpool_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_avgpool_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        avg_pool_name = self.node_name
        avg_pool_node = helper.make_node(
            'AveragePool',
            inputs=self.input_nodes_name,
            outputs=[avg_pool_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, None, padding_stradegy),
            name=avg_pool_name)
        out_shape_info = helper.make_tensor_value_info(
            avg_pool_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        # update tables
        self.value_infos.append(out_shape_info)
        self.node_list.append(avg_pool_node)

        return self.node_list, self.value_infos, self.weight_node_list
コード例 #3
0
    def generate(self):
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        bias_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])
        bias_array = self.tflite_interpreter.get_tensor(
            bias_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_conv_parser.StrideH(),
            self.tflite_conv_parser.StrideW()
        ]
        dilation_factor = [
            self.tflite_conv_parser.DilationHFactor(),
            self.tflite_conv_parser.DilationWFactor()
        ]

        padding_stradegy = 'NONE'
        if self.tflite_conv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_conv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (0, 3, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make bias onnx node
        bias_onnx_node_name = self.node_name + "_bias"
        bias_onnx_node = onnx.helper.make_tensor(bias_onnx_node_name,
                                                 TensorProto.FLOAT,
                                                 bias_array.shape,
                                                 bias_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend(
            [weight_onnx_node_name, bias_onnx_node_name])
        conv_onnx_node = onnx.helper.make_node(
            'Conv',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, dilation_factor,
                                         padding_stradegy),
            dilations=dilation_factor,
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.weight_node_list.append(bias_onnx_node)
        self.node_list.append(conv_onnx_node)

        # change output node's input_name
        for o_n in self.output_nodes:
            for idx, o_n_i_n in enumerate(o_n.input_nodes_name):
                if o_n_i_n == self.node_name:
                    o_n.input_nodes_name[idx] = self.node_list[-1].name

        return self.node_list, self.value_infos, self.weight_node_list
コード例 #4
0
    def generate(self):
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        bias_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])
        bias_array = self.tflite_interpreter.get_tensor(
            bias_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_conv_parser.StrideH(),
            self.tflite_conv_parser.StrideW()
        ]
        dilation_factor = [
            self.tflite_conv_parser.DilationHFactor(),
            self.tflite_conv_parser.DilationWFactor()
        ]

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        input_quantization_info = node_input_detail.get(
            "quantization_parameters", {})
        input_quantization_info["dtype"] = str(
            node_input_detail["dtype"]).split(".")[1].split("'")[0]
        weight_quantization_info = weights_node_info.get(
            "quantization_parameters", {})
        weight_quantization_info["dtype"] = str(
            weights_node_info["dtype"]).split(".")[1].split("'")[0]
        bias_quantization_info = bias_node_info.get("quantization_parameters",
                                                    {})
        bias_quantization_info["dtype"] = str(
            bias_node_info["dtype"]).split(".")[1].split("'")[0]

        #   input_quantization_info_clean = utils.get_quantization_info_in_array(input_quantization_info)
        #   output_quantization_info_clean = utils.get_quantization_info_in_array(output_quantization_info)
        #   weight_quantization_info_clean = utils.get_quantization_info_in_array(weight_quantization_info)
        #   bias_quantization_info_clean = utils.get_quantization_info_in_array(bias_quantization_info)
        #Nested weight and bias into input
        input_quantization_info["weight"] = weight_quantization_info
        input_quantization_info["bias"] = bias_quantization_info

        weights_array = np.array(weights_array, dtype=np.dtype("f4"))
        if "scales" in weight_quantization_info and len(
                weight_quantization_info["scales"]) > 0:
            weights_array = (weights_array -
                             weight_quantization_info["zero_points"][0]
                             ) * weight_quantization_info["scales"][0]
        bias_array = np.array(bias_array, dtype=np.dtype("f4"))
        if "scales" in bias_quantization_info and len(
                bias_quantization_info["scales"]) > 0:
            bias_array = (bias_array - bias_quantization_info["zero_points"][0]
                          ) * bias_quantization_info["scales"][0]
            bias_quantization_info["min"] = [float(min(bias_array))]
            bias_quantization_info["max"] = [float(max(bias_array))]

        padding_stradegy = 'NONE'
        if self.tflite_conv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_conv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (0, 3, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make bias onnx node
        bias_onnx_node_name = self.node_name + "_bias"
        bias_onnx_node = onnx.helper.make_tensor(bias_onnx_node_name,
                                                 TensorProto.FLOAT,
                                                 bias_array.shape,
                                                 bias_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend(
            [weight_onnx_node_name, bias_onnx_node_name])
        conv_onnx_node = onnx.helper.make_node(
            'Conv',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, dilation_factor,
                                         padding_stradegy),
            dilations=dilation_factor,
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.weight_node_list.append(bias_onnx_node)
        self.node_list.append(conv_onnx_node)

        # change output node's input_name
        for o_n in self.output_nodes:
            for idx, o_n_i_n in enumerate(o_n.input_nodes_name):
                if o_n_i_n == self.node_name:
                    o_n.input_nodes_name[idx] = self.node_list[-1].name

        quantization_info = {}
        quantization_info[weight_onnx_node_name] = weight_quantization_info
        quantization_info[bias_onnx_node_name] = bias_quantization_info
        quantization_info[
            previous_onnx_node_names[0]] = input_quantization_info
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info