コード例 #1
0
ファイル: init_ops.py プロジェクト: ORG-MARS/dragon
def eye(n, m=None, k=0, dtype='float32', **kwargs):
    r"""Return a tensor constructed as the identity matrix.

    .. math:: \text{out} \leftarrow \text{diag}(1, 1, ..., 1)

    The rows and cols of matrix are determined by ``n`` and ``m``:

    ```python
    print(dragon.eye(2))     # [[1., 0.], [0., 1.]]
    print(dragon.eye(2, 3))  # [[1., 0., 0.], [0., 1., 0.]]
    ```

    The diagonal could be controlled by ``k``:

    * k > 0: Populate upper diagonal

    * k = 0: Populate main diagonal

    * k < 0: Populate lower diagonal

    Parameters
    ----------
    n : Union[int, dragon.Tensor]
        The number output rows.
    m : Union[int, dragon.Tensor], optional
        The number output cols.
    k : int, optional, default=0
        The index of diagonal.
    dtype : str, optional, default='float32'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = ArgHelper.parse(locals())
    m = n if m is None else m
    trainable = args.pop('trainable') if 'trainable' in args else False
    op_lib = init_ops_lib.Eye
    if context.executing_eagerly():
        if types.is_tensor(n):
            n = int(n.get_value())
        if types.is_tensor(m):
            m = int(m.get_value())
        return op_lib \
            .instantiate(k=k, ndim=2, dtype=dtype) \
            .apply([n, m], trainable=trainable)
    else:
        args['n'] = args['m'] = None
        if types.is_tensor(n) or types.is_tensor(m):
            n = ops.scalar_to_tensor(n, 'int64')
            m = ops.scalar_to_tensor(m, 'int64')
            args['dims_descs'] = [n.id, m.id]
            args['extra_inputs'] = [n, m]
        else:
            args['dims'] = [n, m]
        return op_lib.blend(**args)
コード例 #2
0
ファイル: ops.py プロジェクト: ORG-MARS/dragon
def remove_binary_scalar(inputs):
    """Remove the scalar for binary ops."""
    if types.is_tensor(inputs[0]):
        inputs[1] = scalar_to_tensor(inputs[1], inputs[0].dtype)
    else:
        inputs[0] = scalar_to_tensor(inputs[0], inputs[1].dtype)
    return inputs
コード例 #3
0
ファイル: ops.py プロジェクト: seetaresearch/dragon
def convert_to_tensor(value, dtype=None, name=None):
    """Convert the given value to a tensor.

    Examples:

    ```python
    x = tf.convert_to_tensor([1, 2])
    y = tf.constant([1, 2])  # Equivalent
    ```

    Parameters
    ----------
    value : Union[number, Sequence, numpy.ndarray]
        The value to convert.
    dtype : str, optional
        The optional data type.
    name : str, optional
        The Optional name.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    See Also
    --------
    `tf.constant(...)`_

    """
    if types.is_tensor(value):
        return value
    return constant_op.constant(value, dtype=dtype, name=name)
コード例 #4
0
ファイル: utils.py プロジェクト: ORG-MARS/dragon
 def generator(arguments):
     arg = arguments.get(name, None)
     if arg is None:
         return arguments
     if types.is_tensor(arg):
         ArgHelper._convert_to_desc(arguments, name, arg,
                                    as_target)
     return arguments
コード例 #5
0
ファイル: utils.py プロジェクト: ORG-MARS/dragon
 def generator(arguments):
     arg = arguments.get(name, None)
     if arg is None:
         return arguments
     key = name_v2 if name_v2 else name
     if name_v2:
         arguments.pop(name)
     if types.is_tensor(arg):
         ArgHelper._convert_to_desc(arguments, key, arg,
                                    as_target)
     else:
         if any([types.is_tensor(ele) for ele in arg]):
             ArgHelper._convert_to_descs(
                 arguments, dtype, key, arg, as_target)
         else:
             arguments[key] = arg
     return arguments
コード例 #6
0
ファイル: constant_ops.py プロジェクト: seetaresearch/dragon
def remove_scalars(inputs):
    """Remove the input scalars."""
    if len(inputs) == 2:
        if types.is_tensor(inputs[0]):
            inputs[1] = scalar(inputs[1], inputs[0].dtype)
        else:
            inputs[0] = scalar(inputs[0], inputs[1].dtype)
    return inputs
コード例 #7
0
ファイル: init_ops.py プロジェクト: ORG-MARS/dragon
def constant(value, dtype=None, shape=None, name=None):
    r"""Return a tensor initialized from the value.

    Examples:

    ```python
    a = dragon.constant(1)
    b = dragon.constant(1, dtype='float32', shape=[1, 1, 1])
    c = dragon.constant(numpy.ones((2, 3))
    ```

    Parameters
    ----------
    value : array_like
        The value to initialize from.
    dtype : str, optional
        The optional data type.
    shape : Sequence[int], optional
        The optional tensor shape.
    name : str, optional
        The optional tensor name.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    # Determine the initial value.
    if types.is_tensor(value):
        initial_value = value.get_value()
    else:
        initial_value = value
    # Determine the data type and shape.
    initial_value = numpy.array(initial_value, dtype)
    if not hasattr(value, 'dtype'):
        # Discard the default 64 bit types.
        if initial_value.dtype == numpy.float64:
            initial_value = initial_value.astype(numpy.float32)
        elif initial_value.dtype == numpy.int64:
            initial_value = initial_value.astype(numpy.int32)
    # Determine the shape.
    if shape is not None:
        if initial_value.size == 1:
            # Broadcast with scalar value.
            scalar = initial_value.flatten()[0]
            initial_value = numpy.empty(shape, initial_value.dtype)
            initial_value.fill(scalar)
        else:
            # Reshape.
            initial_value = initial_value.reshape(shape)
    if context.executing_eagerly():
        return EagerTensor(initial_value, name=name)
    else:
        return Tensor.from_value(initial_value, dtype, name)
コード例 #8
0
ファイル: op_schema.py プロジェクト: seetaresearch/dragon
 def generator(arguments):
     arg = arguments.get(name, None)
     if arg is None:
         return arguments
     key = name_v2 if name_v2 else name
     if name_v2 is not None:
         arguments[key] = arguments.pop(name)
     if types.is_tensor(arg):
         OpSchema._convert_to_desc(arguments, key, arg,
                                   as_target)
     return arguments
コード例 #9
0
    def feed_tensor(self, tensor, value, dtype=None, enforce_cpu=False):
        """Copy the value to tensor.

        Examples:

        ```python
        # Define a named tensor to feed
        x = dragon.Tensor(name='x')
        dragon.get_workspace().feed_tensor(x, 0)

        # Feed by specifying a tensor name
        # Note that it will create the implementation whatever
        dragon.get_workspace().feed_tensor('y', 1)
        print(dragon.get_workspace().has_tensor('y'))  # True
        ```

        Parameters
        ----------
        tensor : Union[dragon.Tensor, str]
            The tensor to feed.
        value : array_like
            The value to copy.
        dtype : str, optional
            The optional data type.
        enforce_cpu : bool, optional, default=False
            **True** to copy using cpu context.

        """
        if types.is_tensor(value):
            # Steal the data if value is a tensor
            value = getattr(value, 'get_value')()
        # Determine the data type from argument or value
        if not isinstance(value, numpy.ndarray):
            dtype = 'float32' if dtype is None else dtype
        else:
            dtype = value.dtype if dtype is None else dtype
        if hasattr(tensor, 'dtype') and tensor.dtype is not None:
            if tensor.dtype not in mapping.TENSOR_TYPE_TO_NP_TYPE:
                raise TypeError('Unsupported data type: ' + tensor.dtype)
            dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor.dtype]
        # Determine the copying device option
        if enforce_cpu is True:
            device_option = proto_util.get_device_option('cpu')
        else:
            device_option = proto_util.get_default_device_option()
            if device_option is None:
                device_option = proto_util.get_global_device_option()
        # Copy data to the backend
        self.FeedTensor(
            _stringify_object(tensor),
            numpy.array(value, dtype=dtype, copy=False),
            serialization.serialize_proto(device_option),
        )
コード例 #10
0
ファイル: utils.py プロジェクト: ORG-MARS/dragon
 def _convert_to_descs(arguments, dtype, name, arg, as_target=False):
     """Convert the argument to a sequence of descs."""
     if context.executing_eagerly():
         for i, ele in enumerate(arg):
             if types.is_tensor(ele):
                 arg[i] = ele.get_value().tolist()
         arguments[name] = arg
     else:
         descs = []
         for i, ele in enumerate(arg):
             if types.is_tensor(ele):
                 if as_target:
                     if 'extra_inputs' not in arguments:
                         arguments['extra_inputs'] = []
                     arguments['extra_inputs'] += [ele]
                 descs.append(ele.id)
             else:
                 descs.append(Tensor.from_value(ele, dtype, 'DescConst').id)
         if name in arguments:
             arguments.pop(name)
         arguments[name + '_descs'] = descs
コード例 #11
0
ファイル: ops.py プロジェクト: ORG-MARS/dragon
def scalar_to_tensor(input, dtype):
    """Return a cached scalar tensor."""
    if types.is_tensor(input):
        return input
    try:
        input = float(input)
    except (TypeError, ValueError):
        raise ValueError('<input> should be a python number, got {}.'.format(
            type(input).__name__))
    name = '/share/scalar/{}/{}'.format(dtype, str(input))
    ws = workspace.get_workspace()
    if not ws.has_tensor(name):
        ws.feed_tensor(name, numpy.array(input, dtype))
    return EagerTensor(impl=ws.GetTensor(name), trainable=False)
コード例 #12
0
def where(inputs, **kwargs):
    r"""Select the elements from two branches under the condition.

    .. math::
        \text{out}_{i} =
            \begin{cases}
                \text{input1}_{i}, & \text{ if } \text{condition}_{i} \\
                \text{input2}_{i}, & \text{ otherwise }
            \end{cases}

    Examples:

    ```python
    a = dragon.constant([1, 2, 3])
    b = dragon.constant([3, 2, 1])
    print(dragon.where([a > b, a, b]))  # [3, 2, 3]
    ```

    If only the ``condition`` is given,
    return the coordinates of ``True`` elements:

    ```python
    x = dragon.constant([[True, False, True],
                         [False, True, True]])
    print(dragon.where(x))  # [[0, 0], [0, 2], [1, 1], [1, 2]]
    ```

    Parameters
    ----------
    inputs : Sequence[dragon.Tensor]
        The condition, input1 and input2 tensor.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    See Also
    --------
    `dragon.nonzero(...)`_

    """
    if types.is_tensor(inputs) or len(inputs) == 1:
        return nonzero(inputs, **kwargs)
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('Where', inputs)
    return OpLib.add('Where', inputs, **kwargs)
コード例 #13
0
ファイル: constant_ops.py プロジェクト: seetaresearch/dragon
def scalar(input, dtype):
    """Return a tensor initialized from the scalar."""
    if types.is_tensor(input):
        return input
    try:
        input = float(input)
    except (TypeError, ValueError):
        raise ValueError('<input> should be a python number, got {}.'.format(
            type(input).__name__))
    cached_name = '%s(%s)' % (dtype, input)
    default_ws = workspace.get_workspace()
    impl = default_ws.get_tensor(cached_name)
    if impl is None:
        impl = default_ws.create_tensor(cached_name)
        impl.FromNumpy(numpy.array(input, dtype), True)
    return Tensor((), dtype, impl=impl, symbolic=True)
コード例 #14
0
 def _set_hyper(self, name, value, alias=None):
     """Set the specific hyper parameter."""
     if name not in self._hyper:
         self._hyper[name] = value
     else:
         if types.is_tensor(self._hyper[name]):
             workspace.get_workspace().feed_tensor(
                 self._hyper[name].id,
                 value,
                 dtype='float32',
                 enforce_cpu=True,
             )
         else:
             self._hyper[name] = value
     if alias and name not in self._alias:
         self._alias[name] = '/share/hyper/%s/%s' % (self._op_handle, alias)
コード例 #15
0
 def _create_hypers(self):
     if self._hypers_created:
         return
     current_ws = workspace.get_workspace()
     for name, value in sorted(self._hyper.items()):
         if types.is_tensor(value) or callable(value):
             pass
         else:
             self._hyper[name] = \
                 self._create_weight(
                     name,
                     shape=[],
                     dtype=dtypes.float32,
                     trainable=False,
                     initializer=value)
         hyper = self._hyper[name]
         alias = self._alias.get(name, None)
         if alias is not None:
             current_ws.register_alias(hyper, alias)
     self._hypers_created = True
コード例 #16
0
ファイル: native.py プロジェクト: seetaresearch/dragon
def export(
    inputs,
    outputs,
    f,
    input_names=None,
    output_names=None,
    input_shapes=None,
    opset_version=None,
    verbose=False,
    enable_onnx_checker=True,
):
    """Export the recorded graph to an onnx model.

    Enter into the record mode to export operators into an onnx model:

    ```python
    x = dragon.constant([1, 2, 3])
    with dragon.onnx.record():
        y = x * x
    dragon.onnx.export(inputs=[x], outputs=[y], f='model.onnx')
    ```

    Parameters
    ----------
    inputs : Union[Sequence, Dict]
        The model inputs.
    outputs : Union[Sequence, Dict]
        The model outputs.
    f : str
        The filename for exporting model.
    input_names : Sequence[str], optional
        The name to the inputs.
    output_names : Sequence[str], optional
        The name to the outputs.
    input_shapes : Union[Sequence, Dict], optional
        The optional rewritten for input shapes.
    opset_version : int, optional
        The version of operator set.
    verbose : bool, optional, default=False
        Whether to print the debug string of graph.
    enable_onnx_checker : bool, optional, default=True
        Whether to check if model is valid.

    """
    # Process the inputs.
    if isinstance(inputs, dict):
        if input_names is not None:
            raise ValueError('Excepted the input names from <inputs>.\n'
                             'You should set the <input_names> to None.')
        inputs, input_names = list(inputs.values()), list(inputs.keys())
    else:
        inputs = nest.flatten(inputs)

    # Process the outputs.
    if isinstance(outputs, dict):
        if output_names is not None:
            raise ValueError('Excepted the output names from <outputs>.\n'
                             'You should set the <output_names> to None.')
        outputs, output_names = list(outputs.values()), list(outputs.keys())
    else:
        outputs = nest.flatten(outputs)

    if eager_context.executing_eagerly():
        op_defs = []
        graph_tape = tapes.get_tape()
        if not hasattr(graph_tape, '_exporting'):
            raise RuntimeError('Please enter with ``onnx.frontend.record()``.')
        for op_def in graph_tape.get_elements():
            op_defs.append(dragon_pb2.OperatorDef())
            op_defs[-1].ParseFromString(op_def.SerializeAs())
        graph_def = dragon_pb2.GraphDef(op=op_defs)
    else:
        output_symbols = []
        for output in outputs:
            if types.is_tensor(output) and not output._is_variable:
                output_symbols.append(output)
        graph = GraphLib.from_outputs(output_symbols)
        graph.run()
        graph_def = graph._def
        graph_def.name = ''

    # Add inputs and outputs.
    for i, input in enumerate(inputs):
        if hasattr(input, 'id'):
            graph_def.input.extend([input.id])
        elif input_names is not None:
            graph_def.input.extend([input_names[i]])

    for i, output in enumerate(outputs):
        if hasattr(output, 'id'):
            graph_def.output.extend([output.id])
        elif output_names is not None:
            graph_def.output.extend([output_names[i]])

    # Make value info from inputs and outputs.
    value_names = graph_def.input[:] + graph_def.output[:]
    value_info = dict([(k, (helper.tensor_type(v.dtype), v.shape))
                       for k, v in zip(value_names, inputs + outputs)])

    # Extract the constants from inputs and outputs.
    constants = collections.OrderedDict()
    for k, v in zip(value_names, inputs + outputs):
        if isinstance(v, numpy.ndarray):
            constants[k] = v

    # Export.
    model = graph_def_to_onnx_model(
        graph_def=graph_def,
        input_names=input_names,
        output_names=output_names,
        input_shapes=input_shapes,
        constants=constants,
        value_info=value_info,
        opset_version=opset_version,
        workspace=workspace_util.get_workspace(),
        verbose=verbose,
        enable_onnx_checker=enable_onnx_checker,
    )
    serialization.save_bytes(serialization.serialize_proto(model), f)
コード例 #17
0
ファイル: nn_ops.py プロジェクト: seetaresearch/dragon
def _normalize_spatial_args(
    name,
    values,
    num_total_dims,
    num_spatial_dims,
    start_axis,
):
    if name in ('ksize', 'strides', 'dilations'):
        if values is None:
            return [1] * num_total_dims
        else:
            values = nest.flatten(values)
            if len(values) != num_total_dims:
                defaults, n_provides = [1] * num_total_dims, len(values)
                if n_provides != num_spatial_dims:
                    if n_provides == 1:
                        values = values * num_spatial_dims
                    else:
                        raise ValueError(
                            'Except 1, {} or {} values for <{}>.'.format(
                                num_spatial_dims, num_spatial_dims * 2, name))
                defaults[start_axis:start_axis + len(values)] = values
                return defaults
            return values
    elif name == 'padding':
        if isinstance(values, six.string_types):
            padding, pads = values.upper(), 0
        else:
            padding_tuple = nest.flatten(values)
            padding = 'VALID'
            if len(padding_tuple) == 1:
                pads = padding_tuple[0]
            elif len(padding_tuple) == num_spatial_dims:
                pads = padding_tuple
            elif len(padding_tuple) == (num_spatial_dims * 2):
                pads_l, pads_r = [], []
                for i in range(start_axis, start_axis + num_spatial_dims):
                    pads_l.append(padding_tuple[i * 2])
                    pads_r.append(padding_tuple[i * 2 + 1])
                pads = pads_l + pads_r
            else:
                raise ValueError(
                    'Except 1, {} or {} values if <padding> set as explict pads.'
                    .format(num_spatial_dims, num_spatial_dims * 2))
        return padding, pads
    elif name == 'output_shape':
        if values is not None:
            if types.is_tensor(values):
                values_wide, is_eager = [], types.is_eager_tensor(values)
                for i in range(start_axis, start_axis + num_spatial_dims):
                    values_wide.append(
                        int(values[i]) if is_eager else values[i])
                return values_wide
            else:
                values = nest.flatten(values)
                if len(values) == num_spatial_dims:
                    return values
                elif len(values) == num_total_dims:
                    return values[start_axis:start_axis + start_axis +
                                  num_spatial_dims]
                else:
                    raise ValueError(
                        'Except {} or {} values for <output_shape>.'.format(
                            num_spatial_dims, num_spatial_dims * 2))
        return values