예제 #1
0
파일: graph.py 프로젝트: xyuan/oneflow-1
    def __init__(
        self,
        nodes,
        model_save_dir,
        output_shapes=None,
        dtypes=None,
        opset=None,
        extra_opset=None,
    ):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of oneflow output shapes
            dtypes: dict of oneflow dtype
            input_maps: map (node_name, key) to value_names
        """
        self._nodes = []
        self._nodes_by_name = {}
        self._output_to_node_name = {}
        self.shapes = {}

        self._dtypes = dtypes

        self._model_save_dir = model_save_dir
        self._output_shapes = output_shapes
        self._opset = FindOpset(opset)

        if extra_opset is not None:
            util.MakeSure(isinstance(extra_opset, list), "invalid extra_opset")
        self._extra_opset = extra_opset

        self._order_sensitive_inputs = []
        self.outputs = []

        self.parent_graph = None
        self.contained_graphs = {}  # {node_name: {node_attribute_name: Graph}}

        ops = [Node(node, self) for node in nodes]
        self.ResetNodes(ops)

        for op in ops:
            if op.is_graph_output():
                self.AddGraphOutput(op.input[0])

        # add identity node after each output, in case it is renamed during conversion.
        for o in self.outputs:
            n = self.get_node_by_output_in_current_graph(o)
            new_output_name = id_util.UniqueStr(n.name + "_raw_output")
            n_shapes = n.output_shapes
            n_dtypes = n.output_dtypes
            body_graphs = n.graph.contained_graphs.pop(n.name, None)
            self.RemoveNode(n.name)

            new_outputs = [
                output if output != o else new_output_name
                for output in n.output
            ]
            # domain should be passed to new node
            new_node = self.MakeNode(
                n.type,
                n.input,
                outputs=new_outputs,
                attr=n.attr,
                name=n.name,
                skip_conversion=n._skip_conversion,
                dtypes=n_dtypes,
                shapes=n_shapes,
                domain=n.domain,
            )

            if body_graphs:
                for attr_name, body_graph in body_graphs.items():
                    body_graph.parent_graph = self
                    new_node.set_body_graph_as_attr(attr_name, body_graph)

            self.ReplaceAllInputs(self.get_nodes(), o, new_output_name)
            self.MakeNode(
                "Identity",
                [new_output_name],
                outputs=[o],
                op_name_scope=n.name + "_" + "graph_outputs",
            )
            self.CopyShape(new_output_name, o)
            self.CopyDtype(new_output_name, o)
예제 #2
0
파일: graph.py 프로젝트: xyuan/oneflow-1
 def _GraphCheck(self):
     util.MakeSure(self.graph is not None,
                   "Node %s not belonging any graph", self.name)
예제 #3
0
파일: graph.py 프로젝트: zyg11/oneflow
    def TopologicalSort(self, ops):
        """Topological sort of graph."""
        # sort by name, the result will be reversed alphabeta
        ops.sort(key=lambda op: op.name)

        def _push_stack(stack, node, in_stack):
            stack.append(node)
            if node in in_stack:
                raise ValueError("Graph has cycles.")
            in_stack[node] = True

        def _get_unvisited_child(g, node, not_visited):
            for child in g[node]:
                if child in not_visited:
                    return child
            return -1

        n = len(ops)
        g = [[] for _ in range(n)]
        op_name_to_index = {}
        for i, op in enumerate(ops):
            op_name_to_index[op.name] = i

        for i, op in enumerate(ops):
            all_input = set(op.input_tensor_names)
            implicit_inputs = op.get_implicit_inputs()
            all_input |= set(implicit_inputs)
            # remove those empty inputs
            all_input = list(filter(lambda a: a != "", all_input))
            for inp in sorted(all_input):
                j = self.get_node_by_output(inp)
                util.MakeSure(
                    j is not None, "Cannot find node with output {}".format(inp)
                )
                if self.parent_graph and j.name not in op_name_to_index:
                    # there might be some outer-scoped inputs for an inner Graph.
                    pass
                else:
                    g[op_name_to_index[j.name]].append(i)

        # label for each op. highest = sink nodes.
        label = [-1 for _ in range(n)]
        stack = []
        in_stack = dict()
        not_visited = dict.fromkeys(range(n))
        label_counter = n - 1

        while not_visited:
            node = list(not_visited.keys())[0]
            _push_stack(stack, node, in_stack)
            while stack:
                node = _get_unvisited_child(g, stack[-1], not_visited)
                if node != -1:
                    _push_stack(stack, node, in_stack)
                else:
                    node = stack.pop()
                    in_stack.pop(node)
                    not_visited.pop(node)
                    label[node] = label_counter
                    label_counter -= 1

        ret = [x for _, x in sorted(zip(label, ops))]
        self.ResetNodes(ret)
예제 #4
0
파일: math.py 프로젝트: zhouyuegit/oneflow
def _MakeMinOrMaxOp(ctx,
                    op_type,
                    inputs,
                    outputs,
                    output_shapes=None,
                    output_dtypes=None):
    # support more dtype
    supported_dtypes = [
        onnx_pb.TensorProto.FLOAT,
        onnx_pb.TensorProto.FLOAT16,
        onnx_pb.TensorProto.DOUBLE,
    ]
    target_dtype = onnx_pb.TensorProto.FLOAT
    need_cast = False
    cast_inputs = []
    for inp in inputs:
        dtype = ctx.get_dtype(inp)
        util.MakeSure(dtype is not None, "dtype of {} is None".format(inp))
        if dtype not in supported_dtypes:
            cast_inp = ctx.MakeNode("Cast", [inp], attr={"to": target_dtype})
            cast_inputs.append(cast_inp.output_tensor_names[0])
            need_cast = True
        else:
            cast_inputs.append(inp)
    node = ctx.MakeNode(op_type, cast_inputs, shapes=output_shapes)
    actual_outputs = node.output_tensor_names
    if need_cast:
        origin_dtype = ctx.get_dtype(inputs[0])
        if output_dtypes is not None:
            origin_dtype = output_dtypes[0]
        ctx.set_dtype(node.output_tensor_names[0], target_dtype)
        cast_name = id_util.UniqueStr(node.name)
        cast_node = ctx.InsertNewNodeOnOutput("Cast",
                                              node.output_tensor_names[0],
                                              name=cast_name,
                                              to=origin_dtype)
        ctx.set_dtype(cast_node.output_tensor_names[0], origin_dtype)
        ctx.CopyShape(node.output_tensor_names[0],
                      cast_node.output_tensor_names[0])
        actual_outputs = cast_node.output_tensor_names
    ctx.MakeNode(
        "Identity",
        actual_outputs,
        outputs=outputs,
        shapes=output_shapes,
        dtypes=output_dtypes,
    )

    # onnx < opset 8 does not support broadcasting
    # handle this by doing something like:
    # y = min(x1, add(x2, sub(x1, x1))), where x1, x2 are the inputs and x2 is a scalar
    # this will create a tensor of zeros of the shape of x1, adds x2 to it (which broadcasts) and use that for min.
    shapeo = ctx.get_shape(node.output_tensor_names[0])
    needs_broadcast_op = []
    has_correct_shape = []
    if ctx.opset < 8:
        for i, input_name in enumerate(node.input_tensor_names):
            if ctx.get_shape(input_name) != shapeo:
                needs_broadcast_op.append(i)
            else:
                has_correct_shape.append(input_name)
    if needs_broadcast_op:
        has_correct_shape = has_correct_shape[0]
        for i in needs_broadcast_op:
            input_node = node.input_nodes[i]
            # get a tensor with zeros (since there is no Fill op as of opset8)
            sub_node = ctx.MakeNode(
                "Sub",
                [has_correct_shape, has_correct_shape],
                op_name_scope=input_node.name,
            )
            # use add as 'broadcast' op
            add_node = ctx.MakeNode(
                "Add",
                [
                    input_node.output_tensor_names[0],
                    sub_node.output_tensor_names[0]
                ],
                op_name_scope=input_node.name,
            )
            node.input_tensor_names[i] = add_node.output_tensor_names[0]
예제 #5
0
파일: graph.py 프로젝트: zyg11/oneflow
    def MakeNode(
        self,
        op_type,
        inputs,
        attr=None,
        output_count=1,
        outputs=None,
        skip_conversion=True,
        op_name_scope=None,
        name=None,
        shapes=None,
        dtypes=None,
        domain=constants.ONNX_DOMAIN,
        infer_shape_dtype=True,
    ):
        """Make a new onnx node in the graph"""
        if attr is None:
            attr = {}
        if shapes is None:
            shapes = []
        if dtypes is None:
            dtypes = []

        if name is None:
            name = id_util.UniqueStr(op_type)

        if op_name_scope:
            name = "_".join([op_name_scope, name])

        logger.debug("Making node: Name=%s, OP=%s", name, op_type)

        if outputs is None:
            outputs = [name + ":" + str(i) for i in range(output_count)]

        output_count = len(outputs)
        onnx_attrs = []
        for a, v in attr.items():
            assert not isinstance(v, AttributeProto)

        n = self.get_node_by_name(name)
        util.MakeSure(n is None, "name %s already exists in node: \n%s", name, n)
        for o in outputs:
            n = self.get_node_by_output_in_current_graph(o)
            util.MakeSure(
                n is None, "output tensor named %s already exists in node: \n%s", o, n
            )

        onnx_node = helper.make_node(
            op_type, inputs, outputs, name=name, domain=domain, **attr
        )

        if op_type in ["If", "Loop", "Scan"]:
            # we force the op containing inner graphs not skipped during conversion.
            skip_conversion = False

        node = Node(onnx_node, self, skip_conversion=skip_conversion)
        if onnx_attrs:
            _ = [node.set_attrs_onnx(a) for a in onnx_attrs]

        if shapes:
            util.MakeSure(
                len(shapes) == output_count,
                "output shape count %s not equal to output count %s",
                len(shapes),
                output_count,
            )
            for i in range(output_count):
                self.set_shape(node.output_tensor_names[i], shapes[i])

        if dtypes:
            util.MakeSure(
                len(dtypes) == output_count,
                "output dtypes count %s not equal to output count %s",
                len(dtypes),
                output_count,
            )
            for i in range(output_count):
                self.set_dtype(node.output_tensor_names[i], dtypes[i])

        if (not shapes or not dtypes) and infer_shape_dtype:
            self.UpdateNodeShapeDtype(node, override=False)

        logger.debug("Made node: %s\n%s", node.name, node.summary)
        self._nodes.append(node)
        return node