Exemple #1
0
    def _custom_getter(cls,
                       getter,
                       name,
                       node=None,
                       tensor_dict=None,
                       is_bidirectional=None,
                       *args,
                       **kwargs):
        names = name.split("/")
        if is_bidirectional:
            if "fw" in names:
                index = 0
            elif "bw" in names:
                index = 1
            else:
                raise RuntimeError(
                    "Can not get {} for bidirectional. "
                    "Either fw and bw is not in name scope.".format(names[-1]))

        if names[-1] == "kernel":
            weight_var = tensor_dict[get_variable_name(node,
                                                       cls.weight_var_name)]
            if is_bidirectional:
                w = tf.split(tensor_dict[node.inputs[1]], 2)[index]
                r = tf.split(tensor_dict[node.inputs[2]], 2)[index]
            else:
                w = tensor_dict[node.inputs[1]]
                r = tensor_dict[node.inputs[2]]
            new_w = tf.transpose(tf.squeeze(w))
            new_r = tf.transpose(tf.squeeze(r))
            weight_var.assign(tf.concat([new_w, new_r], 0))
            return weight_var
        if names[-1] == "bias":
            b_shape = node.attrs["hidden_size"]
            bias_var = tensor_dict[get_variable_name(node, cls.bias_var_name)]
            if len(node.inputs) >= 4:
                if is_bidirectional:
                    b = tf.split(tensor_dict[node.inputs[3]], 2)[index]
                else:
                    b = tensor_dict[node.inputs[3]]
                w_b, r_b = tf.split(tf.squeeze(b), 2)
                w_b = tf.transpose(w_b)
                r_b = tf.transpose(r_b)
                bias_var.assign(tf.add(w_b, r_b))
            else:
                bias_var.assign(tf.zeros([b_shape], tf.float32))
            return bias_var
        return getter(name, *args, **kwargs)
 def _create_handlers_variables(self, graph, vars_dict):
   if self.handlers:
     handlers = self.backend._get_handlers(self.opset)
     for node in graph.node:
       handler = handlers[node.domain].get(
           node.op_type, None) if node.domain in handlers else None
       if handler and bool(
           handler.get_req_vars_template(node, self.initializer_dict)):
         for v_name, v_template in handler.get_req_vars_template(
             node, self.initializer_dict).items():
           v_init, v_shape = v_template
           v_name = get_variable_name(node, v_name)
           if v_name in vars_dict.keys():
             # found duplicated variable name due to non unique node name
             exception.NON_UNIQUE_NODE_NAME_EXCEPT()
           vars_dict[v_name] = tf.Variable(v_init,
                                           dtype=v_init.dtype,
                                           shape=v_shape,
                                           name=v_name)
       if node.op_type in ['Loop', 'Scan']:
         onnx_node = OnnxNode(node)
         body = onnx_node.attrs["body"]
         vars_dict = self._create_handlers_variables(body, vars_dict)
       elif node.op_type == 'If':
         onnx_node = OnnxNode(node)
         then_branch = onnx_node.attrs['then_branch']
         vars_dict = self._create_handlers_variables(then_branch, vars_dict)
         else_branch = onnx_node.attrs['else_branch']
         vars_dict = self._create_handlers_variables(else_branch, vars_dict)
   return vars_dict
 def _create_handlers_variables(self, vars_dict):
   if self.handlers:
     handler = self.handlers[self.node.domain].get(
         self.node.op_type,
         None) if self.node.domain in self.handlers else None
     if handler and bool(
         handler.get_req_vars_template(self.node, self.node.attrs)):
       for v_name, v_template in handler.get_req_vars_template(
           self.node, self.node.attrs).items():
         v_init, v_shape = v_template
         v_name = get_variable_name(self.node, v_name)
         vars_dict[v_name] = tf.Variable(v_init,
                                         dtype=v_init.dtype,
                                         shape=v_shape,
                                         name=v_name)
   return vars_dict
 def create_variables(cls, handlers, node, init_dict, var_dict,
                      callback_func):
     """ Create variable base on variable template return in
 get_req_vars_template.
 :param handlers: all backend handlers
 :param node: OnnxNode object.
 :param var_dict: variable dictionary for the model so far
 :param callback_func: the callback function
 :return: updated variable dictionary.
 """
     if bool(cls.get_req_vars_template(node, init_dict)):
         for v_name, v_template in cls.get_req_vars_template(
                 node, init_dict).items():
             v_init, v_shape = v_template
             v_name = get_variable_name(node, v_name)
             if v_name in var_dict.keys():
                 # found duplicated variable name due to non unique node name
                 exception.NONUNIQUE_NODE_NAME_EXCEPT()
             var_dict[v_name] = tf.Variable(v_init,
                                            dtype=v_init.dtype,
                                            shape=v_shape,
                                            name=v_name)
     return var_dict
Exemple #5
0
    def _custom_getter(cls,
                       getter,
                       name,
                       node=None,
                       tensor_dict=None,
                       is_bidirectional=None,
                       *args,
                       **kwargs):
        names = name.split("/")
        if is_bidirectional:
            if "fw" in names:
                index = 0
            elif "bw" in names:
                index = 1
            else:
                raise RuntimeError(
                    "Can not get {} for bidirectional. "
                    "Either fw and bw is not in name scope.".format(names[-1]))

        if names[-1] == "kernel":
            weight_variable = tensor_dict[get_variable_name(
                node, cls.weight_var_name)]
            # onnx W[iofc], R[iofc]
            if is_bidirectional:
                w = tf.split(tensor_dict[node.inputs[1]], 2)[index]
                r = tf.split(tensor_dict[node.inputs[2]], 2)[index]
            else:
                w = tensor_dict[node.inputs[1]]
                r = tensor_dict[node.inputs[2]]
            w_i, w_o, w_f, w_c = tf.split(tf.squeeze(w), 4)
            r_i, r_o, r_f, r_c = tf.split(tf.squeeze(r), 4)
            new_w = tf.transpose(tf.concat([w_i, w_c, w_f, w_o], 0))
            new_r = tf.transpose(tf.concat([r_i, r_c, r_f, r_o], 0))
            kernel = tf.concat([new_w, new_r], 0)
            weight_variable.assign(kernel)
            return weight_variable

        if names[-1] == "bias":
            bias_variable = tensor_dict[get_variable_name(
                node, cls.bias_var_name)]
            if len(node.inputs) >= 4:
                # onnx Wb[iofc], Rb[iofc]
                if is_bidirectional:
                    b = tf.split(tensor_dict[node.inputs[3]], 2)[index]
                else:
                    b = tensor_dict[node.inputs[3]]
                w_b, r_b = tf.split(tf.squeeze(b), 2)
                w_b_i, w_b_o, w_b_f, w_b_c = tf.split(w_b, 4)
                r_b_i, r_b_o, r_b_f, r_b_c = tf.split(r_b, 4)
                w_b = tf.transpose(tf.concat([w_b_i, w_b_c, w_b_f, w_b_o], 0))
                r_b = tf.transpose(tf.concat([r_b_i, r_b_c, r_b_f, r_b_o], 0))
                bias_variable.assign(tf.add(w_b, r_b))

            return bias_variable

        # Only use_peepholes is True,
        # will try to get w_f_diag, w_i_diag, w_o_diag
        # onnx P[iof]
        if names[-1] in ["w_f_diag", "w_i_diag", "w_o_diag"]:
            if is_bidirectional:
                p = tf.split(tensor_dict[node.inputs[7]], 2)[index]
            else:
                p = tensor_dict[node.inputs[7]]
            if names[-1] == "w_f_diag":
                w_f_variable = tensor_dict[get_variable_name(
                    node, cls.peephole_weight_forget_var_name)]
                w_f_variable.assign(tf.split(p, 3, axis=1)[2])
                return w_f_variable
            if names[-1] == "w_i_diag":
                w_i_variable = tensor_dict[get_variable_name(
                    node, cls.peephole_weight_input_var_name)]
                w_i_variable.assign(tf.split(p, 3, axis=1)[0])
                return w_i_variable
            if names[-1] == "w_o_diag":
                w_o_variable = tensor_dict[get_variable_name(
                    node, cls.peephole_weight_output_var_name)]
                w_o_variable.assign(tf.split(p, 3, axis=1)[1])
                return w_o_variable
        return getter(name, *args, **kwargs)
Exemple #6
0
    def _common(cls, node, **kwargs):
        tensor_dict = kwargs["tensor_dict"]
        boxes = tensor_dict[node.inputs[0]]
        scores = tensor_dict[node.inputs[1]]
        # in ONNX spec max_output_boxes_per_class need to be in int64 but
        # max_output_boxes for tf.image.non_max_suppression must be in tf.int32
        # therefore need to cast this input to tf.int32
        max_output_boxes_per_class = tf.cast(
            tensor_dict[node.inputs[2]], tf.int32) if (
                len(node.inputs) > 2
                and node.inputs[2] != "") else tf.constant(0, tf.int32)
        # make sure max_output_boxes_per_class is a scalar not a 1-D 1 element tensor
        max_output_boxes_per_class = tf.squeeze(
            max_output_boxes_per_class) if len(
                max_output_boxes_per_class.shape
            ) == 1 else max_output_boxes_per_class
        iou_threshold = tensor_dict[node.inputs[3]] if (
            len(node.inputs) > 3 and node.inputs[3] != "") else tf.constant(
                0, tf.float32)
        # make sure iou_threshold is a scalar not a 1-D 1 element tensor
        iou_threshold = tf.squeeze(iou_threshold) if len(
            iou_threshold.shape) == 1 else iou_threshold
        score_threshold = tensor_dict[node.inputs[4]] if (
            len(node.inputs) > 4 and node.inputs[4] != "") else tf.constant(
                float('-inf'))
        # make sure score_threshold is a scalar not a 1-D 1 element tensor
        score_threshold = tf.squeeze(score_threshold) if len(
            score_threshold.shape) == 1 else score_threshold
        center_point_box = node.attrs.get("center_point_box", 0)

        if center_point_box == 1:
            boxes_t = tf.transpose(boxes, perm=[0, 2, 1])
            x_centers = tf.slice(boxes_t, [0, 0, 0], [-1, 1, -1])
            y_centers = tf.slice(boxes_t, [0, 1, 0], [-1, 1, -1])
            widths = tf.slice(boxes_t, [0, 2, 0], [-1, 1, -1])
            heights = tf.slice(boxes_t, [0, 3, 0], [-1, 1, -1])
            y1 = tf.subtract(y_centers, tf.divide(heights, 2))
            x1 = tf.subtract(x_centers, tf.divide(widths, 2))
            y2 = tf.add(y_centers, tf.divide(heights, 2))
            x2 = tf.add(x_centers, tf.divide(widths, 2))
            boxes_t = tf.concat([y1, x1, y2, x2], 1)
            boxes = tf.transpose(boxes_t, perm=[0, 2, 1])

        def create_nodes(boxes, scores, max_output_boxes_per_class,
                         iou_threshold, score_threshold, result):
            # get number of batches in boxes
            num_batches = tf_shape(boxes)[0]
            for batch_i in tf.range(num_batches):
                # get boxes in batch_i only
                tf_boxes = tf.squeeze(tf.gather(boxes, [batch_i]), axis=0)
                # get scores of all classes in batch_i only
                batch_i_scores = tf.squeeze(tf.gather(scores, [batch_i]),
                                            axis=0)
                # get number of classess in batch_i only
                num_classes = tf_shape(batch_i_scores)[0]
                for class_j in tf.range(num_classes):
                    # get scores in class_j for batch_i only
                    tf_scores = tf.squeeze(tf.gather(batch_i_scores,
                                                     [class_j]),
                                           axis=0)
                    # get the selected boxes indices
                    selected_indices = tf.image.non_max_suppression(
                        tf_boxes, tf_scores, max_output_boxes_per_class,
                        iou_threshold, score_threshold)
                    # add batch and class information into the indices
                    output = tf.transpose(
                        [tf.cast(selected_indices, dtype=tf.int64)])
                    paddings = tf.constant([[0, 0], [1, 0]])
                    output = tf.pad(output,
                                    paddings,
                                    constant_values=tf.cast(class_j,
                                                            dtype=tf.int64))
                    output = tf.pad(output,
                                    paddings,
                                    constant_values=tf.cast(batch_i,
                                                            dtype=tf.int64))
                    # tf.function will auto convert "result" from variable to placeholder
                    # therefore don't need to use assign here
                    result = output if tf.equal(batch_i, 0) and tf.equal(
                        class_j, 0) else tf.concat([result, output], 0)

            return result

        result = tensor_dict[get_variable_name(node, cls.var_name)]
        return [
            create_nodes(boxes, scores, max_output_boxes_per_class,
                         iou_threshold, score_threshold, result)
        ]
Exemple #7
0
 def _custom_getter(cls,
                    getter,
                    name,
                    node=None,
                    tensor_dict=None,
                    is_bidirectional=None,
                    *args,
                    **kwargs):
     names = name.split("/")
     if is_bidirectional:
         if "fw" in names:
             index = 0
         elif "bw" in names:
             index = 1
         else:
             raise RuntimeError(
                 "Can not get {} for bidirectional. "
                 "Either fw and bw is not in name scope.".format(names[-1]))
     if names[-1] == "kernel":
         # onnx W[zrh], R[zrh]
         if is_bidirectional:
             w = tf.split(tensor_dict[node.inputs[1]], 2)[index]
             r = tf.split(tensor_dict[node.inputs[2]], 2)[index]
         else:
             w = tensor_dict[node.inputs[1]]
             r = tensor_dict[node.inputs[2]]
         w_z, w_r, w_h = tf.split(tf.squeeze(w), 3)
         r_z, r_r, r_h = tf.split(tf.squeeze(r), 3)
         if names[-2] == "gates":
             weight_var = tensor_dict[get_variable_name(
                 node, cls.weight_gates_var_name)]
             new_w = tf.transpose(tf.concat([w_r, w_z], 0))
             new_r = tf.transpose(tf.concat([r_r, r_z], 0))
         elif names[-2] == "candidate":
             weight_var = tensor_dict[get_variable_name(
                 node, cls.weight_candidate_var_name)]
             new_w = tf.transpose(w_h)
             new_r = tf.transpose(r_h)
         else:
             weight_var = tensor_dict[get_variable_name(
                 node, cls.weight_other_var_name)]
             new_w = None
             new_r = None
         weight_var.assign(tf.concat([new_w, new_r], 0))
         return weight_var
     if names[-1] == "bias":
         if names[-2] == "gates":
             bias_var = tensor_dict[get_variable_name(
                 node, cls.bias_gates_var_name)]
         elif names[-2] == "candidate":
             bias_var = tensor_dict[get_variable_name(
                 node, cls.bias_candidate_var_name)]
         if len(node.inputs) >= 4:
             # onnx Wb[zrh], Rb[zrh]
             if is_bidirectional:
                 b = tf.split(tensor_dict[node.inputs[3]], 2)[index]
             else:
                 b = tensor_dict[node.inputs[3]]
             w_b, r_b = tf.split(tf.squeeze(b), 2)
             w_b_z, w_b_r, w_b_h = tf.split(w_b, 3)
             r_b_z, r_b_r, r_b_h = tf.split(r_b, 3)
             if names[-2] == "gates":
                 w_b = tf.transpose(tf.concat([w_b_r, w_b_z], 0))
                 r_b = tf.transpose(tf.concat([r_b_r, r_b_z], 0))
             elif names[-2] == "candidate":
                 w_b = tf.transpose(w_b_h)
                 r_b = tf.transpose(r_b_h)
             bias_var.assign(tf.add(w_b, r_b))
         else:
             bias_var.assign(
                 tf.zeros([node.attrs["hidden_size"]], tf.float32))
         return bias_var
     return getter(name, *args, **kwargs)