Ejemplo n.º 1
0
 def from_updates(grads_and_vars, optimizer, name=None):
     """Create a graph from the updates."""
     name = 'Graph' if name is None else name
     execute_ws = workspace.get_workspace()
     graph_def = dragon_pb2.GraphDef(name=name)
     GraphLib._add_updates(graph_def, grads_and_vars, optimizer)
     GraphLib._add_device(graph_def)
     graph_def.name = execute_ws.create_graph(graph_def)
     return GraphExec(graph_def, execute_ws)
Ejemplo n.º 2
0
 def from_outputs(outputs, name=None):
     """Create a graph from the output tensors."""
     outputs = nest.flatten(outputs)
     name = 'Graph' if name is None else name
     execute_ws = workspace.get_workspace()
     graph_def = dragon_pb2.GraphDef(name=name)
     GraphLib._add_outputs(graph_def, outputs)
     GraphLib._add_grads(graph_def, outputs)
     GraphLib._add_device(graph_def)
     GraphLib._add_optimization(graph_def)
     graph_def.name = execute_ws.create_graph(graph_def)
     return GraphExec(graph_def, execute_ws)
Ejemplo n.º 3
0
 def from_onnx(model, name=None):
     """Create a graph from the onnx model."""
     execute_ws = workspace.get_workspace()
     graph_str = execute_ws._impl.PrepareONNXModel(model)
     graph_def = dragon_pb2.GraphDef()
     graph_def.ParseFromString(graph_str)
     graph_def.name = 'Graph' if name is None else name
     GraphLib._add_device(graph_def)
     GraphLib._add_optimization(graph_def)
     for input in graph_def.input:
         execute_ws.create_tensor(input)
     graph_def.name = execute_ws.create_graph(graph_def)
     return GraphExec(graph_def, execute_ws)
Ejemplo n.º 4
0
    def __init__(self, model, device, **kwargs):
        """Create a ``BackendRep``.

        Parameters
        ----------
        model : str
            The path of onnx model file.
        device : onnx.Device
            The executing device.

        """
        if not isinstance(device, Device):
            device = Device(device)
        graph_str = workspace.get_workspace().PrepareONNXModel(model)
        graph_def = dragon_pb2.GraphDef()
        graph_def.ParseFromString(graph_str)
        if device.type == DeviceType.CPU:
            device_type, device_index = 'cpu', 0
        elif device.type == DeviceType.CUDA:
            device_type, device_index = 'cuda', device.device_id
        else:
            raise ValueError('Unsupported device type: ' + device.type)
        with context.device(device_type, device_index):
            self._function = function_lib.Function(name='ONNXGraph') \
                                         .import_from(graph_def)
        self._input_dict = collections.OrderedDict([
            (impl.name,
             EagerTensor(impl=impl,
                         device=device_spec.DeviceSpec(device_type,
                                                       device_index)))
            for impl in self._function.inputs
        ])
        self._output_dict = collections.OrderedDict([
            (impl.name,
             EagerTensor(impl=impl,
                         device=device_spec.DeviceSpec(device_type,
                                                       device_index)))
            for impl in self._function.outputs
        ])
Ejemplo n.º 5
0
def export(
    inputs,
    outputs,
    f,
    input_names=None,
    output_names=None,
    input_shapes=None,
    opset_version=None,
    verbose=False,
    enable_onnx_checker=True,
):
    """Export the recorded graph to an onnx model.

    Enter into the record mode to export operators into an onnx model:

    ```python
    x = dragon.constant([1, 2, 3])
    with dragon.onnx.record():
        y = x * x
    dragon.onnx.export(inputs=[x], outputs=[y], f='model.onnx')
    ```

    Parameters
    ----------
    inputs : Union[Sequence, Dict]
        The model inputs.
    outputs : Union[Sequence, Dict]
        The model outputs.
    f : str
        The filename for exporting model.
    input_names : Sequence[str], optional
        The name to the inputs.
    output_names : Sequence[str], optional
        The name to the outputs.
    input_shapes : Union[Sequence, Dict], optional
        The optional rewritten for input shapes.
    opset_version : int, optional
        The version of operator set.
    verbose : bool, optional, default=False
        Whether to print the debug string of graph.
    enable_onnx_checker : bool, optional, default=True
        Whether to check if model is valid.

    """
    # Process the inputs.
    if isinstance(inputs, dict):
        if input_names is not None:
            raise ValueError(
                'Excepted the input names from <inputs>.\n'
                'You should set the <input_names> to None.')
        inputs, input_names = list(inputs.values()), list(inputs.keys())
    else:
        inputs = nest.flatten(inputs)

    # Process the outputs.
    if isinstance(outputs, dict):
        if output_names is not None:
            raise ValueError(
                'Excepted the output names from <outputs>.\n'
                'You should set the <output_names> to None.')
        outputs, output_names = list(outputs.values()), list(outputs.keys())
    else:
        outputs = nest.flatten(outputs)

    if eager_context.executing_eagerly():
        op_defs = []
        tape = backprop.get_default_tape()
        if tape is None:
            raise RuntimeError('Please enter with ``onnx.frontend.record()``.')
        for op_def in tape._defs:
            op_defs.append(dragon_pb2.OperatorDef())
            op_defs[-1].ParseFromString(op_def.SerializeAs())
        graph_def = dragon_pb2.GraphDef(op=op_defs)
    else:
        symbolic_outputs = []
        for output in outputs:
            if types.is_symbolic_tensor(output):
                symbolic_outputs.append(output)
        graph_func = function_lib.create_function(outputs=symbolic_outputs)
        graph_func.callback()
        graph_def = graph_func.graph_def
        graph_def.name = ''

    # Add inputs and outputs.
    for i, input in enumerate(inputs):
        if hasattr(input, 'id'):
            graph_def.input.extend([input.id])
        elif input_names is not None:
            graph_def.input.extend([input_names[i]])

    for i, output in enumerate(outputs):
        if hasattr(output, 'id'):
            graph_def.output.extend([output.id])
        elif output_names is not None:
            graph_def.output.extend([output_names[i]])

    # Make value info from inputs and outputs.
    value_names = graph_def.input[:] + graph_def.output[:]
    value_info = dict([(k, (helper.tensor_type(v.dtype), v.shape))
                       for k, v in zip(value_names, inputs + outputs)])

    # Extract the constants from inputs and outputs.
    constants = collections.OrderedDict()
    for k, v in zip(value_names, inputs + outputs):
        if isinstance(v, numpy.ndarray):
            constants[k] = v

    # Export.
    model = graph_def_to_onnx_model(
        graph_def=graph_def,
        input_names=input_names,
        output_names=output_names,
        input_shapes=input_shapes,
        constants=constants,
        value_info=value_info,
        opset_version=opset_version,
        workspace=workspace_util.get_workspace(),
        verbose=verbose,
        enable_onnx_checker=enable_onnx_checker,
    )
    serialization.save_bytes(serialization.serialize_proto(model), f)
Ejemplo n.º 6
0
 def __init__(self, name=None):
     self.callback = None
     self.graph_def = dragon_pb2.GraphDef()
     self.graph_def.name = name if name else 'Graph'
     self.graph_name = None  # Determined after creating
     self.inputs, self.outputs = None, None
Ejemplo n.º 7
0
def export(
    model,
    args,
    f,
    input_names=None,
    output_names=None,
    input_shapes=None,
    opset_version=None,
    verbose=False,
    enable_onnx_checker=True,
):
    """Export the recorded graph to an onnx model.

    The outputs will be obtained by calling ``model(*args)``,
    both the tensor or numpy array are allowed:

    ```python
    class MyModule(torch.nn.Module):
        def __init__(self):
            super(MyModule, self).__init__()
            self.fc = torch.nn.Linear(3, 3)

        def forward(self, x):
            y = self.fc(x)
            return y, np.ones((2, 3))

    m = MyModule()
    x = torch.zeros(2, 3)
    torch.onnx.export(
        m,
        args=(x,),
        f='my_module.onnx',
        input_names=('x',),
        output_names=('y', 'ones'),
    )
    ```

    You can either specify the ``input_names``, or pass a *dict*
    to the ``args``. In the same way, ``model`` could return a *dict*
    to specify the ``output_names``:

    ```python
    class MyModule(torch.nn.Module):
        def __init__(self):
            super(MyModule, self).__init__()
            self.fc = torch.nn.Linear(3, 3)

        def forward(self, inputs):
            y = self.fc(inputs['x'])
            return {'y': y, 'ones': np.ones((2, 3))}

    m = MyModule()
    x = torch.zeros(2, 3)
    torch.onnx.export(
        m,
        args={'x': x},
        f='my_module.onnx',
    )
    ```

    Also note that if a numpy array is given or returned,
    it's name is definitely required. Otherwise, ONNX can't
    export this value due to the lacking of *id*.

    Parameters
    ----------
    model : dragon.vm.torch.nn.Module
        The module to export.
    args : Union[Sequence, Dict]
        The model inputs.
    f : str
        The filename for exporting model.
    input_names : Sequence[str], optional
        The name to the inputs.
    output_names : Sequence[str], optional
        The name to the outputs.
    input_shapes : Union[Sequence, Dict], optional
        The optional rewritten for input shapes.
    opset_version : int, optional
        The version of operator set.
    verbose : bool, optional, default=False
        Whether to print the debug string of graph.
    enable_onnx_checker : bool, optional, default=True
        Whether to check if model is valid.

    """
    # Process the inputs.
    if isinstance(args, dict):
        if input_names is not None:
            raise ValueError('Excepted the input names from <args>.\n'
                             'You should set the <input_names> to None.')
        inputs, input_names, args = \
            list(args.values()), list(args.keys()), [args]
    else:
        inputs = args = nest.flatten(args)

    # Run the model to get the outputs.
    execute_ws = workspace.Workspace()
    execute_ws.merge_from(workspace.get_workspace())
    with execute_ws.as_default():
        with tapes.Tape() as model_tape:
            model_tape._exporting = True
            outputs = model(*args)

    # Process the outputs
    if isinstance(outputs, dict):
        if output_names is not None:
            raise ValueError('Excepted the output names from <outputs>.\n'
                             'You should set the <output_names> to None.')
        outputs, output_names = list(outputs.values()), list(outputs.keys())
    else:
        outputs = nest.flatten(outputs)

    # Make graph def.
    ops_def, graph_def = [], dragon_pb2.GraphDef()

    # Add inputs and outputs.
    for i, input in enumerate(inputs):
        if hasattr(input, 'id'):
            graph_def.input.extend([input.id])
        elif input_names is not None:
            graph_def.input.extend([input_names[i]])

    for i, output in enumerate(outputs):
        if hasattr(output, 'id'):
            graph_def.output.extend([output.id])
        elif output_names is not None:
            graph_def.output.extend([output_names[i]])

    # Add operators.
    for op_def in model_tape.get_elements():
        ops_def.append(dragon_pb2.OperatorDef())
        ops_def[-1].ParseFromString(op_def.SerializeAs())
    graph_def.op.extend(ops_def)

    # Make value info from inputs and outputs.
    value_names = graph_def.input[:] + graph_def.output[:]
    value_info = dict([(k, (helper.tensor_type(v.dtype), v.shape))
                       for k, v in zip(value_names, inputs + outputs)])

    # Extract the constants from inputs and outputs.
    constants = collections.OrderedDict()
    for k, v in zip(value_names, inputs + outputs):
        if isinstance(v, numpy.ndarray):
            constants[k] = v

    # Export.
    with execute_ws.as_default():
        model = graph_def_to_onnx_model(
            graph_def=graph_def,
            input_names=input_names,
            output_names=output_names,
            input_shapes=input_shapes,
            constants=constants,
            value_info=value_info,
            opset_version=opset_version,
            workspace=execute_ws,
            verbose=verbose,
            enable_onnx_checker=enable_onnx_checker,
        )
        serialization.save_bytes(serialization.serialize_proto(model), f)