Example #1
0
 def _build_graphs(self, *args, **kwargs):
     attributes = self._attribute_cache[workspace.get_workspace()]
     input_signature = self._spec.input_signature
     args, kwargs = self._spec.separate_inputs(*args, **kwargs)
     inputs = []
     for i in range(self._spec.num_inputs):
         input_spec = None
         if input_signature is not None:
             input_spec = input_signature[i]
         if not isinstance(args[i], Tensor) and input_spec is None:
             inputs.append(args[i])
             continue
         name = 'Input_%d' % (i + 1)
         shape = getattr(args[i], 'shape', None)
         dtype = getattr(args[i], 'dtype', None)
         if input_spec is not None:
             shape, dtype = input_spec.shape, input_spec.dtype
         inputs.append(Tensor(shape, dtype, name=name, symbolic=True))
     with eager_context.graph_mode():
         outputs = self._run_function(*inputs, **kwargs)
     graph_outputs, dummies, graphs = [], [], []
     for output in nest.flatten(outputs):
         if isinstance(output, Tensor):
             graph_outputs.append(output)
         else:
             dummies.append(output)
     if len(graph_outputs) > 0:
         graphs.append(GraphLib.from_outputs(graph_outputs))
     for obj in dummies:
         if isinstance(obj, GraphExec):
             graphs.append(obj)
     attributes['inputs'] = inputs
     attributes['outputs'] = outputs
     attributes['graphs'] = graphs
     return graphs
Example #2
0
def export(
    inputs,
    outputs,
    f,
    input_names=None,
    output_names=None,
    input_shapes=None,
    opset_version=None,
    verbose=False,
    enable_onnx_checker=True,
):
    """Export the recorded graph to an onnx model.

    Enter into the record mode to export operators into an onnx model:

    ```python
    x = dragon.constant([1, 2, 3])
    with dragon.onnx.record():
        y = x * x
    dragon.onnx.export(inputs=[x], outputs=[y], f='model.onnx')
    ```

    Parameters
    ----------
    inputs : Union[Sequence, Dict]
        The model inputs.
    outputs : Union[Sequence, Dict]
        The model outputs.
    f : str
        The filename for exporting model.
    input_names : Sequence[str], optional
        The name to the inputs.
    output_names : Sequence[str], optional
        The name to the outputs.
    input_shapes : Union[Sequence, Dict], optional
        The optional rewritten for input shapes.
    opset_version : int, optional
        The version of operator set.
    verbose : bool, optional, default=False
        Whether to print the debug string of graph.
    enable_onnx_checker : bool, optional, default=True
        Whether to check if model is valid.

    """
    # Process the inputs.
    if isinstance(inputs, dict):
        if input_names is not None:
            raise ValueError('Excepted the input names from <inputs>.\n'
                             'You should set the <input_names> to None.')
        inputs, input_names = list(inputs.values()), list(inputs.keys())
    else:
        inputs = nest.flatten(inputs)

    # Process the outputs.
    if isinstance(outputs, dict):
        if output_names is not None:
            raise ValueError('Excepted the output names from <outputs>.\n'
                             'You should set the <output_names> to None.')
        outputs, output_names = list(outputs.values()), list(outputs.keys())
    else:
        outputs = nest.flatten(outputs)

    if eager_context.executing_eagerly():
        op_defs = []
        graph_tape = tapes.get_tape()
        if not hasattr(graph_tape, '_exporting'):
            raise RuntimeError('Please enter with ``onnx.frontend.record()``.')
        for op_def in graph_tape.get_elements():
            op_defs.append(dragon_pb2.OperatorDef())
            op_defs[-1].ParseFromString(op_def.SerializeAs())
        graph_def = dragon_pb2.GraphDef(op=op_defs)
    else:
        output_symbols = []
        for output in outputs:
            if types.is_tensor(output) and not output._is_variable:
                output_symbols.append(output)
        graph = GraphLib.from_outputs(output_symbols)
        graph.run()
        graph_def = graph._def
        graph_def.name = ''

    # Add inputs and outputs.
    for i, input in enumerate(inputs):
        if hasattr(input, 'id'):
            graph_def.input.extend([input.id])
        elif input_names is not None:
            graph_def.input.extend([input_names[i]])

    for i, output in enumerate(outputs):
        if hasattr(output, 'id'):
            graph_def.output.extend([output.id])
        elif output_names is not None:
            graph_def.output.extend([output_names[i]])

    # Make value info from inputs and outputs.
    value_names = graph_def.input[:] + graph_def.output[:]
    value_info = dict([(k, (helper.tensor_type(v.dtype), v.shape))
                       for k, v in zip(value_names, inputs + outputs)])

    # Extract the constants from inputs and outputs.
    constants = collections.OrderedDict()
    for k, v in zip(value_names, inputs + outputs):
        if isinstance(v, numpy.ndarray):
            constants[k] = v

    # Export.
    model = graph_def_to_onnx_model(
        graph_def=graph_def,
        input_names=input_names,
        output_names=output_names,
        input_shapes=input_shapes,
        constants=constants,
        value_info=value_info,
        opset_version=opset_version,
        workspace=workspace_util.get_workspace(),
        verbose=verbose,
        enable_onnx_checker=enable_onnx_checker,
    )
    serialization.save_bytes(serialization.serialize_proto(model), f)