Example #1
0
 def _build_graphs(self, *args, **kwargs):
     attributes = self._attribute_cache[workspace.get_workspace()]
     input_signature = self._spec.input_signature
     args, kwargs = self._spec.separate_inputs(*args, **kwargs)
     inputs = []
     for i in range(self._spec.num_inputs):
         input_spec = None
         if input_signature is not None:
             input_spec = input_signature[i]
         if not isinstance(args[i], Tensor) and input_spec is None:
             inputs.append(args[i])
             continue
         name = 'Input_%d' % (i + 1)
         shape = getattr(args[i], 'shape', None)
         dtype = getattr(args[i], 'dtype', None)
         if input_spec is not None:
             shape, dtype = input_spec.shape, input_spec.dtype
         inputs.append(Tensor(shape, dtype, name=name, symbolic=True))
     with eager_context.graph_mode():
         outputs = self._run_function(*inputs, **kwargs)
     graph_outputs, dummies, graphs = [], [], []
     for output in nest.flatten(outputs):
         if isinstance(output, Tensor):
             graph_outputs.append(output)
         else:
             dummies.append(output)
     if len(graph_outputs) > 0:
         graphs.append(GraphLib.from_outputs(graph_outputs))
     for obj in dummies:
         if isinstance(obj, GraphExec):
             graphs.append(obj)
     attributes['inputs'] = inputs
     attributes['outputs'] = outputs
     attributes['graphs'] = graphs
     return graphs
Example #2
0
    def __init__(self, model, device, **kwargs):
        """Create a ``BackendRep``.

        Parameters
        ----------
        model : str
            The path of onnx model file.
        device : onnx.Device
            The executing device.

        """
        if not isinstance(device, Device):
            device = Device(device)
        execute_ws = workspace.get_workspace()
        if device.type == DeviceType.CPU:
            device_type, device_index = 'cpu', 0
        elif device.type == DeviceType.CUDA:
            device_type, device_index = 'cuda', device.device_id
        else:
            raise ValueError('Unsupported device type: ' + device.type)
        with context.device(device_type, device_index):
            self._context = GraphLib.from_onnx(model)
        self._input_dict = collections.OrderedDict()
        self._output_dict = collections.OrderedDict()
        for input in self._context._def.input:
            impl = execute_ws.get_tensor(input)
            self._input_dict[input] = Tensor(impl=impl)
        for output in self._context._def.output:
            impl = execute_ws.get_tensor(output)
            self._output_dict[output] = Tensor(impl=impl)
        self._output_tuple = namedtupledict('Outputs', self._context._def.output)
Example #3
0
    def apply_gradients(self, grads_and_vars):
        """Apply the gradients on variables.

        Parameters
        ----------
        grads_and_vars : Sequence[Sequence[dragon.Tensor]]
            The sequence of update pair.

        """
        # Create execution context for graph mode.
        if not context.executing_eagerly():
            return GraphLib.from_updates(grads_and_vars, self)

        # Separate variables by explicit weight decay.
        group_vars = collections.defaultdict(list)
        group_grads = collections.defaultdict(list)
        for grad, var in grads_and_vars:
            if grad is not None:
                weight_decay = getattr(var, '_weight_decay', None)
                if weight_decay is not None:
                    weight_decay = float(weight_decay)
                group_vars[weight_decay].append(var)
                group_grads[weight_decay].append(grad)

        # Reduce grads in the process group.
        process_group = distributed.get_group()
        if process_group is not None:
            grads = list(itertools.chain(*group_grads.values()))
            OpLib.execute('Collective', grads, outputs=grads,
                          operation='ALLREDUCE', reduction='MEAN',
                          **process_group.arguments)

        # Apply updates.
        for weight_decay, vars in group_vars.items():
            grads = group_grads[weight_decay]
            # Skip if grads are all missing.
            if len(grads) == 0:
                continue
            OpLib.execute(self._op_type, grads, outputs=vars,
                          name=self._name, weight_decay=weight_decay)
Example #4
0
def export(
    inputs,
    outputs,
    f,
    input_names=None,
    output_names=None,
    input_shapes=None,
    opset_version=None,
    verbose=False,
    enable_onnx_checker=True,
):
    """Export the recorded graph to an onnx model.

    Enter into the record mode to export operators into an onnx model:

    ```python
    x = dragon.constant([1, 2, 3])
    with dragon.onnx.record():
        y = x * x
    dragon.onnx.export(inputs=[x], outputs=[y], f='model.onnx')
    ```

    Parameters
    ----------
    inputs : Union[Sequence, Dict]
        The model inputs.
    outputs : Union[Sequence, Dict]
        The model outputs.
    f : str
        The filename for exporting model.
    input_names : Sequence[str], optional
        The name to the inputs.
    output_names : Sequence[str], optional
        The name to the outputs.
    input_shapes : Union[Sequence, Dict], optional
        The optional rewritten for input shapes.
    opset_version : int, optional
        The version of operator set.
    verbose : bool, optional, default=False
        Whether to print the debug string of graph.
    enable_onnx_checker : bool, optional, default=True
        Whether to check if model is valid.

    """
    # Process the inputs.
    if isinstance(inputs, dict):
        if input_names is not None:
            raise ValueError('Excepted the input names from <inputs>.\n'
                             'You should set the <input_names> to None.')
        inputs, input_names = list(inputs.values()), list(inputs.keys())
    else:
        inputs = nest.flatten(inputs)

    # Process the outputs.
    if isinstance(outputs, dict):
        if output_names is not None:
            raise ValueError('Excepted the output names from <outputs>.\n'
                             'You should set the <output_names> to None.')
        outputs, output_names = list(outputs.values()), list(outputs.keys())
    else:
        outputs = nest.flatten(outputs)

    if eager_context.executing_eagerly():
        op_defs = []
        graph_tape = tapes.get_tape()
        if not hasattr(graph_tape, '_exporting'):
            raise RuntimeError('Please enter with ``onnx.frontend.record()``.')
        for op_def in graph_tape.get_elements():
            op_defs.append(dragon_pb2.OperatorDef())
            op_defs[-1].ParseFromString(op_def.SerializeAs())
        graph_def = dragon_pb2.GraphDef(op=op_defs)
    else:
        output_symbols = []
        for output in outputs:
            if types.is_tensor(output) and not output._is_variable:
                output_symbols.append(output)
        graph = GraphLib.from_outputs(output_symbols)
        graph.run()
        graph_def = graph._def
        graph_def.name = ''

    # Add inputs and outputs.
    for i, input in enumerate(inputs):
        if hasattr(input, 'id'):
            graph_def.input.extend([input.id])
        elif input_names is not None:
            graph_def.input.extend([input_names[i]])

    for i, output in enumerate(outputs):
        if hasattr(output, 'id'):
            graph_def.output.extend([output.id])
        elif output_names is not None:
            graph_def.output.extend([output_names[i]])

    # Make value info from inputs and outputs.
    value_names = graph_def.input[:] + graph_def.output[:]
    value_info = dict([(k, (helper.tensor_type(v.dtype), v.shape))
                       for k, v in zip(value_names, inputs + outputs)])

    # Extract the constants from inputs and outputs.
    constants = collections.OrderedDict()
    for k, v in zip(value_names, inputs + outputs):
        if isinstance(v, numpy.ndarray):
            constants[k] = v

    # Export.
    model = graph_def_to_onnx_model(
        graph_def=graph_def,
        input_names=input_names,
        output_names=output_names,
        input_shapes=input_shapes,
        constants=constants,
        value_info=value_info,
        opset_version=opset_version,
        workspace=workspace_util.get_workspace(),
        verbose=verbose,
        enable_onnx_checker=enable_onnx_checker,
    )
    serialization.save_bytes(serialization.serialize_proto(model), f)