Ejemplo n.º 1
0
    def generate(self):
        clip_name = self.node_name

        six = np.array([6.0])
        zero = np.array([0.0])
        # onnx clip only support no shape tensor in min max node
        value_max_node = tflite_utils.create_constant_node(
            clip_name + '_max_6', [], six)
        value_min_node = tflite_utils.create_constant_node(
            clip_name + '_min_0', [], zero)

        prev_node_names = self.input_nodes_name.copy()
        prev_node_names.append(value_min_node.name)
        prev_node_names.append(value_max_node.name)

        clip_node = helper.make_node('Clip',
                                     inputs=prev_node_names,
                                     outputs=[clip_name],
                                     name=clip_name)

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = helper.make_tensor_value_info(
            clip_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        self.value_infos.append(out_shape_info)
        self.node_list.append(value_min_node)
        self.node_list.append(value_max_node)
        self.node_list.append(clip_node)

        return self.node_list, self.value_infos, self.weight_node_list, {}
Ejemplo n.º 2
0
    def generate(self):
        relu_node = helper.make_node("Relu",
                                     name=self.node_name,
                                     inputs=self.input_nodes_name,
                                     outputs=[self.node_name])

        # original layer output
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        self.value_infos.append(out_shape_info)
        self.node_list.append(relu_node)

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        quantization_info = {}
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info
Ejemplo n.º 3
0
    def generate(self):
        slope_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        slope_array = self.tflite_interpreter.get_tensor(
            slope_node_info['index'])
        slope_array = np.transpose(slope_array, (2, 0, 1))

        # make slope onnx node
        slope_onnx_node_name = self.node_name + "_slope"
        slope_onnx_node = onnx.helper.make_tensor(
            slope_onnx_node_name, TensorProto.FLOAT, slope_array.shape,
            slope_array.flatten().tolist())
        self.weight_node_list.append(slope_onnx_node)

        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend([slope_onnx_node_name])
        prelu_node = onnx.helper.make_node('PRelu',
                                           inputs=previous_onnx_node_names,
                                           outputs=[self.node_name],
                                           name=self.node_name)
        self.node_list.append(prelu_node)

        # original layer output
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        return self.node_list, self.value_infos, self.weight_node_list
Ejemplo n.º 4
0
def set_end_node(onnx_end_node, onnx_end_node_shape):

    out_value_info_name = "out_" + onnx_end_node.name
    out_value_info = helper.make_tensor_value_info(
        out_value_info_name, TensorProto.FLOAT,
        tflite_utils.tflite2onnx_shape_map(onnx_end_node_shape))

    # change output
    onnx_end_node.output[:] = [out_value_info_name]

    return out_value_info
Ejemplo n.º 5
0
  def generate(self):

      node_output_detail = self.tflite_interpreter._get_tensor_details(self.op.Outputs(0))
      node_input_detail = self.tflite_interpreter._get_tensor_details(self.op.Inputs(0))

      kernel_shape = [self.tflite_avgpool_parser.FilterHeight(),self.tflite_avgpool_parser.FilterWidth()]
      strides_len = [self.tflite_avgpool_parser.StrideH(),self.tflite_avgpool_parser.StrideW()]

      padding_stradegy = 'NONE' 
      if self.tflite_avgpool_parser.Padding() is Padding.SAME:
          padding_stradegy = 'SAME' 
      elif self.tflite_avgpool_parser.Padding() is Padding.VALID:
          padding_stradegy = 'VALID' 

      input_feature_map_shape = node_input_detail['shape']

      avg_pool_name = self.node_name
      avg_pool_node = helper.make_node(
          'AveragePool',
          inputs=self.input_nodes_name,
          outputs=[avg_pool_name],
          kernel_shape=kernel_shape,
          strides=strides_len,
          pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape, strides_len, None, padding_stradegy),
          name=avg_pool_name
      )
      out_shape_info = helper.make_tensor_value_info(
          avg_pool_name,
          TensorProto.FLOAT,
          tflite_utils.tflite2onnx_shape_map(node_output_detail['shape'].tolist())
      )

      # update tables
      self.value_infos.append(out_shape_info)
      self.node_list.append(avg_pool_node)

      #Generate Quantization Info and Reverse Quantization for Weights and Bias
      output_quantization_info = node_output_detail.get("quantization_parameters", {})
      output_quantization_info["dtype"] = str(node_output_detail["dtype"]).split(".")[1].split("'")[0]
      input_quantization_info = node_input_detail.get("quantization_parameters", {})
      input_quantization_info["dtype"] = str(node_input_detail["dtype"]).split(".")[1].split("'")[0]
      quantization_info = {}
      quantization_info[self.input_nodes_name[0]] = input_quantization_info
      quantization_info[avg_pool_name] = output_quantization_info

      return self.node_list, self.value_infos, self.weight_node_list, quantization_info
Ejemplo n.º 6
0
    def generate(self):
        relu_name = self.node_name
        relu_node = helper.make_node("Relu",
                                     name=relu_name,
                                     inputs=self.input_nodes_name,
                                     outputs=[relu_name])

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = onnx.helper.make_tensor_value_info(
            relu_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        self.value_infos.append(out_shape_info)
        self.node_list.append(relu_node)

        return self.node_list, self.value_infos, self.weight_node_list, {}
Ejemplo n.º 7
0
    def generate(self):

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        kernel_shape = [
            self.tflite_avgpool_parser.FilterHeight(),
            self.tflite_avgpool_parser.FilterWidth()
        ]
        strides_len = [
            self.tflite_avgpool_parser.StrideH(),
            self.tflite_avgpool_parser.StrideW()
        ]

        padding_stradegy = 'NONE'
        if self.tflite_avgpool_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_avgpool_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        avg_pool_name = self.node_name
        avg_pool_node = helper.make_node(
            'AveragePool',
            inputs=self.input_nodes_name,
            outputs=[avg_pool_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, None, padding_stradegy),
            name=avg_pool_name)
        out_shape_info = helper.make_tensor_value_info(
            avg_pool_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        # update tables
        self.value_infos.append(out_shape_info)
        self.node_list.append(avg_pool_node)

        return self.node_list, self.value_infos, self.weight_node_list
Ejemplo n.º 8
0
    def generate(self):
        clip_name = self.node_name

        six = np.array([6.0])
        zero = np.array([0.0])
        # onnx clip only support no shape tensor in min max node
        value_max_node = tflite_utils.create_constant_node(
            clip_name + '_max_6', [], six)
        value_min_node = tflite_utils.create_constant_node(
            clip_name + '_min_0', [], zero)

        prev_node_names = self.input_nodes_name.copy()
        prev_node_names.append(value_min_node.name)
        prev_node_names.append(value_max_node.name)

        clip_node = onnx.helper.make_node('Clip',
                                          inputs=prev_node_names,
                                          outputs=[clip_name],
                                          name=clip_name)

        # original layer output
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = onnx.helper.make_tensor_value_info(
            clip_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        self.value_infos.append(out_shape_info)
        self.node_list.append(value_min_node)
        self.node_list.append(value_max_node)
        self.node_list.append(clip_node)

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        quantization_info = {}
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info
Ejemplo n.º 9
0
    def generate(self):
        slope_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        slope_array = self.tflite_interpreter.get_tensor(
            slope_node_info['index'])
        slope_array = np.transpose(slope_array, (2, 0, 1))

        # make slope onnx node
        slope_onnx_node_name = self.node_name + "_slope"
        slope_onnx_node = onnx.helper.make_tensor(
            slope_onnx_node_name, TensorProto.FLOAT, slope_array.shape,
            slope_array.flatten().tolist())
        self.weight_node_list.append(slope_onnx_node)

        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend([slope_onnx_node_name])
        prelu_node = onnx.helper.make_node('PRelu',
                                           inputs=previous_onnx_node_names,
                                           outputs=[self.node_name],
                                           name=self.node_name)
        self.node_list.append(prelu_node)

        # original layer output
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        quantization_info = {}
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info
Ejemplo n.º 10
0
def main(model_path,
         model_save_path=None,
         add_transpose_for_channel_last_first_issue=True,
         bottom_nodes_name=None):

    onnx_weight_node_list = []
    output_tensor_value_info = []
    onnx_node_list = []
    inner_node_shape_value_info = []

    # parse node information through tflite interpreter (tflite interpreter can't parse operator information in our target tensorflow version 1.15)
    interpreter = tf.lite.Interpreter(model_path)
    interpreter.allocate_tensors()

    # get model input info(assume there is only one input)
    input_details = interpreter.get_input_details()
    model_input_name = input_details[0]['name']
    input_tensor_value_info = None

    # generate tree
    tree_graph = Tree(model_path=model_path,
                      bottom_nodes_name=bottom_nodes_name,
                      defused=True)

    # get sequential node name
    sequential_keys = tree_graph.get_sequential_nodes_key()

    # get tree node in the form of {node_name: op_node_obj}
    tree_dict = tree_graph.get_nodes()

    #############################
    # build head transpose node #
    #############################
    for h_node in tree_graph.get_head_nodes():
        # transpose for channel last to channel first
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                h_node.node_name)
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                h_node.node_input_shape.tolist())
            h_transpose_node = build_head_transpose_node_for_channel_last_2_channel_first(
                input_tensor_value_info.name, h_node.node_name)

            onnx_node_list.append(h_transpose_node)
            h_node.input_nodes_name = [h_transpose_node.name]
        else:
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                tflite_utils.tflite2onnx_shape_map(
                    h_node.node_input_shape.tolist()))
            h_node.input_nodes_name = [input_tensor_value_info.name]

    ############################
    # build model node by node #
    ############################
    dumped_quantization_info = {}
    for key in sequential_keys:
        logging.getLogger('tflite2onnx').debug("generating: " + key)
        nodes, val, weight, quantization_info = tree_dict[key].generate()

        if (len(val) != 0) and (tree_dict[key].is_bottom_node is False):
            inner_node_shape_value_info.extend(val)
        if len(weight) != 0:
            onnx_weight_node_list.extend(weight)
        if len(nodes) != 0:
            onnx_node_list.extend(nodes)
        if len(quantization_info) != 0:
            merge_quantization_info(dumped_quantization_info,
                                    quantization_info)

    if check_quantization(interpreter.get_tensor_details()):
        json_save_path = model_save_path[:-5] + "_user_config.json"
        with open(json_save_path, "w") as f:
            print(json_save_path)
            json.dump(dumped_quantization_info, f, indent=1)
            print("New Qunatized information saved")

    # sometimes, there are sub-node in one tree node, we need to find the last one
    b_nodes = [node for node in tree_graph.get_bottom_nodes()]

    ###############################
    # build bottom transpose node #
    ###############################
    for b_node in b_nodes:

        out_value_info = None
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                b_node.node_name)
            out_value_info, transpose_node = build_button_transpose_node_for_channel_first_2_channel_last(
                b_node.node_list[-1], b_node.node_output_shape.tolist())

            if transpose_node != None:
                onnx_node_list.append(transpose_node)
        else:
            out_value_info = set_end_node(b_node.node_list[-1],
                                          b_node.node_output_shape.tolist())
        output_tensor_value_info.append(out_value_info)

    input_init = [input_tensor_value_info]
    input_init.extend(onnx_weight_node_list)
    onnx_inputs = tflite_utils.make_kneron_valid_onnx_input(input_init)

    graph_cnn = helper.make_graph(onnx_node_list,
                                  'cnn_test',
                                  onnx_inputs,
                                  output_tensor_value_info,
                                  onnx_weight_node_list,
                                  value_info=inner_node_shape_value_info)

    cnn_model = helper.make_model(graph_cnn, producer_name='Kneron')
    cnn_model.opset_import[0].version = 11

    # add generated time to model meta data
    helper.set_model_props(
        cnn_model, {
            'Generated Time':
            datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + " (UTC+0)"
        })

    cnn_model = onnx.utils.polish_model(cnn_model)

    # save
    if model_save_path is not None:
        onnx.save(cnn_model, model_save_path)
    return cnn_model
Ejemplo n.º 11
0
    def generate(self):

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        output_shape_value = self.tflite_interpreter.get_tensor(
            node_output_detail['index'])

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_tconv_parser.StrideH(),
            self.tflite_tconv_parser.StrideW()
        ]

        padding_stradegy = 'NONE'
        if self.tflite_tconv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_tconv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (3, 0, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend([weight_onnx_node_name])
        tconv_onnx_node = onnx.helper.make_node(
            'ConvTranspose',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,

            # TODO: calculate padding for tanspose conv
            #pads = tflite_utils.getPadding(input_feature_map_shape, kernel_shape, strides_len, padding_stradegy),
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(output_shape_value.tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.node_list.append(tconv_onnx_node)

        return self.node_list, self.value_infos, self.weight_node_list
Ejemplo n.º 12
0
    def generate(self):
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        # create scale constant node
        tensor_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))

        source_width, source_height = node_input_detail['shape'].tolist()[1:3]
        target_width, targwt_height = self.tflite_interpreter.get_tensor(
            tensor_input_detail['index']).tolist()

        source_size = np.array([1.0, 1.0, source_height, source_width],
                               dtype=np.float)
        target_siz = np.array([1.0, 1.0, targwt_height, target_width],
                              dtype=np.float)

        scale_val = target_siz / source_size
        scale_constant_node = tflite_utils.create_constant_node(
            self.node_name + '_scales', scale_val.shape, scale_val)

        constant_info = onnx.helper.make_tensor_value_info(
            name=scale_constant_node.name,
            elem_type=TensorProto.FLOAT,
            shape=scale_val.shape)

        self.node_list.append(scale_constant_node)
        self.value_infos.append(constant_info)

        # create roi constant node
        roi_constant_node = tflite_utils.create_constant_node(
            self.node_name + 'resize_roi', [], np.array([-1],
                                                        dtype=np.float32))
        self.node_list.append(roi_constant_node)

        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend(
            [roi_constant_node.name, scale_constant_node.name])
        resize_nearest_neighbor_node = onnx.helper.make_node(
            op_type='Resize',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            name=self.node_name,
            mode='linear',
            coordinate_transformation_mode='align_corners'
            if self.tflite_resize_bilinear_parser.AlignCorners() == True else
            'half_pixel')

        resize_nearest_neighbor_info = onnx.helper.make_tensor_value_info(
            name=self.node_name,
            elem_type=TensorProto.FLOAT,
            shape=tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))

        # update tables
        self.node_list.append(resize_nearest_neighbor_node)
        self.value_infos.append(resize_nearest_neighbor_info)

        return self.node_list, self.value_infos, self.weight_node_list
Ejemplo n.º 13
0
    def generate(self):
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        bias_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])
        bias_array = self.tflite_interpreter.get_tensor(
            bias_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_conv_parser.StrideH(),
            self.tflite_conv_parser.StrideW()
        ]
        dilation_factor = [
            self.tflite_conv_parser.DilationHFactor(),
            self.tflite_conv_parser.DilationWFactor()
        ]

        padding_stradegy = 'NONE'
        if self.tflite_conv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_conv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (0, 3, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make bias onnx node
        bias_onnx_node_name = self.node_name + "_bias"
        bias_onnx_node = onnx.helper.make_tensor(bias_onnx_node_name,
                                                 TensorProto.FLOAT,
                                                 bias_array.shape,
                                                 bias_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend(
            [weight_onnx_node_name, bias_onnx_node_name])
        conv_onnx_node = onnx.helper.make_node(
            'Conv',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, dilation_factor,
                                         padding_stradegy),
            dilations=dilation_factor,
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.weight_node_list.append(bias_onnx_node)
        self.node_list.append(conv_onnx_node)

        # change output node's input_name
        for o_n in self.output_nodes:
            for idx, o_n_i_n in enumerate(o_n.input_nodes_name):
                if o_n_i_n == self.node_name:
                    o_n.input_nodes_name[idx] = self.node_list[-1].name

        return self.node_list, self.value_infos, self.weight_node_list
Ejemplo n.º 14
0
    def generate(self):

        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        output_shape_value = self.tflite_interpreter.get_tensor(
            node_output_detail['index'])

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_tconv_parser.StrideH(),
            self.tflite_tconv_parser.StrideW()
        ]

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        input_quantization_info = node_input_detail.get(
            "quantization_parameters", {})
        input_quantization_info["dtype"] = str(
            node_input_detail["dtype"]).split(".")[1].split("'")[0]
        weight_quantization_info = weights_node_info.get(
            "quantization_parameters", {})
        weight_quantization_info["dtype"] = str(
            weights_node_info["dtype"]).split(".")[1].split("'")[0]
        weights_array = np.array(weights_array, dtype=np.dtype("f4"))
        if "scales" in weight_quantization_info and len(
                weight_quantization_info["scales"]) > 0:
            weights_array = (weights_array -
                             weight_quantization_info["zero_points"][0]
                             ) * weight_quantization_info["scales"][0]
        #Nested weight quantization info into input
        input_quantization_info["weight"] = weight_quantization_info

        padding_stradegy = 'NONE'
        if self.tflite_tconv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_tconv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (3, 0, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend([weight_onnx_node_name])
        tconv_onnx_node = onnx.helper.make_node(
            'ConvTranspose',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,

            # TODO: calculate padding for tanspose conv
            #pads = tflite_utils.getPadding(input_feature_map_shape, kernel_shape, strides_len, padding_stradegy),
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(output_shape_value.tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.node_list.append(tconv_onnx_node)

        quantization_info = {}
        quantization_info[weight_onnx_node_name] = weight_quantization_info
        quantization_info[
            previous_onnx_node_names[0]] = input_quantization_info
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info
Ejemplo n.º 15
0
    def generate(self):
        node_output_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Outputs(0))
        node_input_detail = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(0))

        weights_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(1))
        bias_node_info = self.tflite_interpreter._get_tensor_details(
            self.op.Inputs(2))

        weights_array = self.tflite_interpreter.get_tensor(
            weights_node_info['index'])
        bias_array = self.tflite_interpreter.get_tensor(
            bias_node_info['index'])

        kernel_shape = [weights_array.shape[1], weights_array.shape[2]]

        strides_len = [
            self.tflite_conv_parser.StrideH(),
            self.tflite_conv_parser.StrideW()
        ]
        dilation_factor = [
            self.tflite_conv_parser.DilationHFactor(),
            self.tflite_conv_parser.DilationWFactor()
        ]

        #Generate Quantization Info and Reverse Quantization for Weights and Bias
        output_quantization_info = node_output_detail.get(
            "quantization_parameters", {})
        output_quantization_info["dtype"] = str(
            node_output_detail["dtype"]).split(".")[1].split("'")[0]
        input_quantization_info = node_input_detail.get(
            "quantization_parameters", {})
        input_quantization_info["dtype"] = str(
            node_input_detail["dtype"]).split(".")[1].split("'")[0]
        weight_quantization_info = weights_node_info.get(
            "quantization_parameters", {})
        weight_quantization_info["dtype"] = str(
            weights_node_info["dtype"]).split(".")[1].split("'")[0]
        bias_quantization_info = bias_node_info.get("quantization_parameters",
                                                    {})
        bias_quantization_info["dtype"] = str(
            bias_node_info["dtype"]).split(".")[1].split("'")[0]

        #   input_quantization_info_clean = utils.get_quantization_info_in_array(input_quantization_info)
        #   output_quantization_info_clean = utils.get_quantization_info_in_array(output_quantization_info)
        #   weight_quantization_info_clean = utils.get_quantization_info_in_array(weight_quantization_info)
        #   bias_quantization_info_clean = utils.get_quantization_info_in_array(bias_quantization_info)
        #Nested weight and bias into input
        input_quantization_info["weight"] = weight_quantization_info
        input_quantization_info["bias"] = bias_quantization_info

        weights_array = np.array(weights_array, dtype=np.dtype("f4"))
        if "scales" in weight_quantization_info and len(
                weight_quantization_info["scales"]) > 0:
            weights_array = (weights_array -
                             weight_quantization_info["zero_points"][0]
                             ) * weight_quantization_info["scales"][0]
        bias_array = np.array(bias_array, dtype=np.dtype("f4"))
        if "scales" in bias_quantization_info and len(
                bias_quantization_info["scales"]) > 0:
            bias_array = (bias_array - bias_quantization_info["zero_points"][0]
                          ) * bias_quantization_info["scales"][0]
            bias_quantization_info["min"] = [float(min(bias_array))]
            bias_quantization_info["max"] = [float(max(bias_array))]

        padding_stradegy = 'NONE'
        if self.tflite_conv_parser.Padding() is Padding.SAME:
            padding_stradegy = 'SAME'
        elif self.tflite_conv_parser.Padding() is Padding.VALID:
            padding_stradegy = 'VALID'

        input_feature_map_shape = node_input_detail['shape']

        # transpose because shape define diffent between tflite and onnx
        weights_array = np.transpose(weights_array, (0, 3, 1, 2))

        # make weight onnx node
        weight_onnx_node_name = self.node_name + "_weight"
        weight_onnx_node = onnx.helper.make_tensor(
            weight_onnx_node_name, TensorProto.FLOAT, weights_array.shape,
            weights_array.flatten().tolist())

        # make bias onnx node
        bias_onnx_node_name = self.node_name + "_bias"
        bias_onnx_node = onnx.helper.make_tensor(bias_onnx_node_name,
                                                 TensorProto.FLOAT,
                                                 bias_array.shape,
                                                 bias_array.flatten().tolist())

        # make conv onnx node
        previous_onnx_node_names = self.input_nodes_name.copy()
        previous_onnx_node_names.extend(
            [weight_onnx_node_name, bias_onnx_node_name])
        conv_onnx_node = onnx.helper.make_node(
            'Conv',
            inputs=previous_onnx_node_names,
            outputs=[self.node_name],
            kernel_shape=kernel_shape,
            strides=strides_len,
            pads=tflite_utils.getPadding(input_feature_map_shape, kernel_shape,
                                         strides_len, dilation_factor,
                                         padding_stradegy),
            dilations=dilation_factor,
            name=self.node_name,
            group=1)

        # original layer output
        out_shape_info = onnx.helper.make_tensor_value_info(
            self.node_name, TensorProto.FLOAT,
            tflite_utils.tflite2onnx_shape_map(
                node_output_detail['shape'].tolist()))
        self.value_infos.append(out_shape_info)

        # add weight, bias node
        self.weight_node_list.append(weight_onnx_node)
        self.weight_node_list.append(bias_onnx_node)
        self.node_list.append(conv_onnx_node)

        # change output node's input_name
        for o_n in self.output_nodes:
            for idx, o_n_i_n in enumerate(o_n.input_nodes_name):
                if o_n_i_n == self.node_name:
                    o_n.input_nodes_name[idx] = self.node_list[-1].name

        quantization_info = {}
        quantization_info[weight_onnx_node_name] = weight_quantization_info
        quantization_info[bias_onnx_node_name] = bias_quantization_info
        quantization_info[
            previous_onnx_node_names[0]] = input_quantization_info
        quantization_info[self.node_name] = output_quantization_info

        return self.node_list, self.value_infos, self.weight_node_list, quantization_info