Esempio n. 1
0
def glorot_uniform(shape, mode='fan_in', scale=3.0, dtype='float32', **kwargs):
    r"""Return a tensor initialized from the glorot uniform distribution.

    .. math::
        \text{out} \sim \mathcal{U}(-\sqrt{\frac{scale}{\text{fan}}},
                                     \sqrt{\frac{scale}{\text{fan}}})

    Parameters
    ----------
    shape : Sequence[Union[int, dragon.Tensor]]
        The tensor shape.
    mode : {'fan_in', 'fan_out', 'fan_avg'}, optional
        The mode to compute fans.
    scale : float, optional, default=3.0
        The scale factor to distribution.
    dtype : str, optional, default='float32'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['scale'] = float(scale)
    args['mode'] = mode.lower()
    if context.executing_eagerly():
        return OpLib.execute('GlorotUniform', [],
                             ndim=len(args['dims']),
                             **args)
    return OpLib.add('GlorotUniform', [], **args)
Esempio n. 2
0
def tile(inputs, repeats, **kwargs):
    """Repeat elements along each axis of input.

    Examples:

    ```python
    x = dragon.constant([[1, 2], [3, 4]])
    print(dragon.tile(x, repeats=(1, 2)))  # [[1, 2, 1, 2], [3, 4, 3, 4]]
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    repeats : Union[Sequence[int], dragon.Tensor]]
        The repetition for each axis.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Tile', inputs, ndim=len(args['repeats']), repeats=args['repeats'])
    return OpLib.add('Tile', **args)
Esempio n. 3
0
def permutation(limit, dtype='int64', **kwargs):
    r"""Return a tensor with value in the permuted range.

    Set :attr:`limit` to determine a range :math:`[0, \text{limit})`:

    ```python
    x = dragon.random.permutation(4)
    ```

    Parameters
    ----------
    limit: Union[number, dragon.Tensor]
        The end of interval.
    dtype : str, optional, default='int64'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['dtype'] = args['dtype'].lower()
    if context.executing_eagerly():
        return OpLib.execute('Permutation', [],
                             dtype=dtype,
                             limit=args['limit'])
    return OpLib.add('Permutation', [], **args)
Esempio n. 4
0
def truncated_normal(shape, mean=0, std=1, dtype='float32', **kwargs):
    r"""Return a tensor initialized from the truncated normal distribution.

    .. math:: \text{out} \sim \mathcal{TN}(\mu, \sigma^{2},
                                           \mu - 2\sigma, \mu + 2\sigma)

    Parameters
    ----------
    shape : Sequence[Union[int, dragon.Tensor]]
        The tensor shape.
    mean : number, optional, default=0
        The value to :math:`\mu`.
    std : number, optional, default=1
        The value to :math:`\sigma`.
    dtype : str, optional, default='float32'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['mean'], args['std'] = float(mean), float(std)
    if context.executing_eagerly():
        return OpLib.execute('TruncatedNormal', [],
                             ndim=len(args['dims']),
                             **args)
    return OpLib.add('TruncatedNormal', [], **args)
Esempio n. 5
0
 def get_config(self, device, **kwargs):
     """Return the execution config."""
     cache_key = self._op_type + '/' + str(device)
     for k, v in kwargs.items():
         if k not in self._ignore_keys:
             cache_key += '/' + str(v)
     try:
         return self._config_cache[cache_key]
     except KeyError:
         def_args, feed_dict = {}, {}
         def_args_getter = OpSchema.get_args(self._op_type)
         if def_args_getter is not None:
             def_args = def_args_getter(**kwargs)
         device = def_args.pop('device', device)
         check_device = def_args.pop('check_device', True)
         no_grad = def_args.pop('no_grad', False)
         for k, v in def_args.items():
             if k.endswith('_desc') and v:
                 name = k.split('_desc')[0]
                 feed_dict[name] = v
                 def_args[k] = '$NAME/' + name
         op_def = proto_util.make_operator_def(
             op_type=self._op_type,
             name=kwargs.get('name', ''),
             device_option=device.to_proto(False),
             cache_key=cache_key,
             to_impl=True, **def_args)
         config = {'def': op_def,
                   'device': device,
                   'check_device': check_device,
                   'no_grad': no_grad,
                   'feed_dict': feed_dict}
         self._config_cache[cache_key] = config
         return config
Esempio n. 6
0
def channel_norm(inputs,
                 mean,
                 std,
                 axis=-1,
                 dtype='float32',
                 perm=None,
                 **kwargs):
    """Apply the normalization to each channel of input.

    :attr:`axis` can be negative:

    ```python
    m = s = (1., 1., 1.)
    x = dragon.constant([1, 2, 3])
    print(dragon.nn.channel_norm(x, m, s, axis=0))   # [0., 1., 2.]
    print(dragon.nn.channel_norm(x, m, s, axis=-1))  # Equivalent
    ```

    If :attr:`perm` provided, :attr:`axis` is selected from the output layout:

    ```python
    m, s = (1., 2., 3.), (1., 1., 1.)
    x = dragon.constant([[1, 2, 3]])
    # Provided 3 values to normalize the last axis
    # with length 1, only the first value will be taken
    print(dragon.nn.channel_norm(x, m, s, perm=(1, 0)))  # [[0.], [1.], [2.]]
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    mean : Sequence[float], required
        The mean to subtract.
    std : Sequence[float], required
        The standard deviation to divide.
    axis : int, optional, default=-1
        The channel axis.
    dtype : str, optional, default='float32'
        The output data type.
    perm : Sequence[Union[int, dragon.Tensor]], optional
        The output permutation.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('ChannelNorm',
                             inputs,
                             axis=axis,
                             mean=mean,
                             std=std,
                             dtype=dtype,
                             ndim=len(args['perm']) if perm is not None else 0,
                             perm=args['perm'])
    return OpLib.add('ChannelNorm', **args)
Esempio n. 7
0
def assign(inputs, starts=None, sizes=None, copy=False, **kwargs):
    r"""Assign the value to input.

    .. math:: \text{input}[\text{start}:\text{start} + \text{size}, ...] = \text{value}

    Parameters
    ----------
    inputs : Sequence[dragon.Tensor]
        The input and value tensor.
    starts : Union[Sequence[int], dragon.Tensor]], optional
        The start location for each dimension.
    sizes : Union[Sequence[int], dragon.Tensor]], optional
        The number of elements from start.
    copy : bool, optional, default=False
        Return a new tensor or call in-place.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    inputs = constant_ops.remove_scalars(inputs)
    if context.executing_eagerly():
        starts = args['starts'] if starts is not None else [0]
        sizes = args['sizes'] if sizes is not None else [-1]
        return OpLib.execute(
            'Assign', inputs, outputs=[None if copy else inputs[0]],
            ndim=len(starts), starts=starts, sizes=sizes)
    return OpLib.add('Assign', **args)
Esempio n. 8
0
def broadcast(inputs, root=0, group=None, **kwargs):
    """Broadcast the input from root node in a group.

    Parameters
    ----------
    inputs : dragon.Tensor
        The tensor to broadcast.
    root : int, optional, default=0
        The node index in the group.
    group : ProcessGroup, optional
        The communication group.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if group is None:
        group = distributed.get_group()
    if group is None:
        raise ValueError('<group> is required.')
    coll_args = group.arguments.copy()
    coll_args['root'] = root
    coll_args['operation'] = 'BROADCAST'
    if context.executing_eagerly():
        return OpLib.execute('Collective', inputs, **coll_args)
    kwargs.update(coll_args)
    return OpLib.add('Collective', inputs, **kwargs)
Esempio n. 9
0
def random_uniform(shape, low=0, high=1, dtype='float32', **kwargs):
    r"""Return a tensor initialized from the uniform distribution.

    .. math:: \text{out} \sim \mathcal{U}(\alpha, \beta)

    Parameters
    ----------
    shape : Sequence[Union[int, dragon.Tensor]]
        The tensor shape.
    low : number, optional, default=0
        The value to :math:`\alpha`.
    high : number, optional, default=1
        The value to :math:`\beta`.
    dtype : str, optional, default='float32'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['low'], args['high'] = float(low), float(high)
    if context.executing_eagerly():
        return OpLib.execute('RandomUniform', [],
                             ndim=len(args['dims']),
                             **args)
    return OpLib.add('RandomUniform', [], **args)
Esempio n. 10
0
def fill(shape, value=0, dtype='float32', **kwargs):
    r"""Return a tensor filled with the scalar value.

    .. math:: \text{out} \leftarrow \text{value}

    Parameters
    ----------
    shape : Sequence[Union[int, dragon.Tensor]]
        The tensor shape.
    value : number, optional, default=0
        The value to fill.
    dtype : str, optional, default='float32'
        The optional data type.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['value'] = float(value)
    if context.executing_eagerly():
        return OpLib.execute('Fill', [], ndim=len(args['dims']), **args)
    return OpLib.add('Fill', [], **args)
Esempio n. 11
0
def sync_batch_norm(inputs,
                    axis=-1,
                    momentum=0.9,
                    epsilon=1e-5,
                    use_stats=-1,
                    process_group=None,
                    **kwargs):
    r"""Apply the batch normalization with synced statistics.
    `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.

    The normalization is defined as:

    .. math:: y = \frac{x - \mathrm{E}[x]}
                       {\sqrt{\mathrm{Var}[x] + \epsilon}}
                  * \gamma + \beta

    The running average of statistics are calculated as:

    .. math:: x_{\text{running}} = \text{momentum} * x_{\text{running}}
                                   + (1 - \text{momentum}) * x_{\text{batch}}

    Parameters
    ----------
    inputs : Sequence[dragon.Tensor]
        The tensor ``x``, ``gamma``, ``beta``, ``mean`` and ``var``.
    axis : int, optional, default=-1
        The channel axis.
    momentum : Union[float, dragon.Tensor], optional
        The value to :math:`\text{momentum}`.
    epsilon : float, optional, default=1e-5
        The value to :math:`\epsilon`.
    use_stats : int, optional, default=-1
        Whether to use estimated statistics or not.
    process_group : ProcessGroup, optional
        The group for communication.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['epsilon'] = float(epsilon)
    if process_group is None:
        process_group = distributed.get_group()
    if process_group is None:
        raise ValueError('<process_group> is required.')
    if context.executing_eagerly():
        return OpLib.execute('SyncBatchNorm',
                             inputs,
                             axis=axis,
                             epsilon=args['epsilon'],
                             use_stats=use_stats,
                             momentum=args['momentum'],
                             **process_group.arguments)
    args.pop('process_group')
    args.update(process_group.arguments)
    return OpLib.add('SyncBatchNorm', **args)
Esempio n. 12
0
def drop_block(inputs,
               ratio=0.1,
               block_size=1,
               data_format='NCHW',
               inplace=False,
               **kwargs):
    r"""Set the blocks over input to zero randomly.
    `[Ghiasi et.al, 2018] <https://arxiv.org/abs/1810.12890>`_.

    The **DropBlock** function is defined as:

    .. math::
        \text{DropBlock}(x_{ijk}) =
            x_{ijk} * (r_{ik} \sim \mathcal{B}(1, 1 - \gamma)) \\ \quad \\
        \text{where}\quad \gamma =
            \frac{\text{ratio}}{\text{block\_size}^{n}}
            \frac{\text{feat\_size}^{n}}{(\text{feat\_size} - \text{block\_size} + 1)^n}

    Examples:

    ```python
    x = dragon.ones((1, 3, 5, 5), 'float32')
    print(dragon.nn.drop_block(x, ratio=0.5, block_size=3))
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    ratio : Union[float, dragon.Tensor], optional, default=0.1
        The probability to zero a block.
    block_size : int, optional, default=7
        The spatial block size.
    data_format : str, optional, default='NCHW'
        ``'NCHW'`` or ``'NHWC'``.
    inplace : bool, optional, default=False
        Call in-place or return a new tensor.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('DropBlock',
                             inputs,
                             outputs=inputs if inplace else [None],
                             block_size=block_size,
                             data_format=data_format,
                             ratio=args['ratio'])
    args.pop('inplace')
    return OpLib.add('DropBlock', **args)
Esempio n. 13
0
 def __init__(self, op_type):
     self._op_type = op_type
     self._ignore_keys = {'outputs'}
     def_args = {}
     def_args_getter = OpSchema.get_args(op_type)
     if def_args_getter is not None:
         def_args = def_args_getter()
     for k, v in def_args.items():
         if k.endswith('_desc'):
             self._ignore_keys.add(k.split('_desc')[0])
     self._config_cache = {}
Esempio n. 14
0
def batch_norm(inputs,
               axis=-1,
               momentum=0.9,
               epsilon=1e-5,
               use_stats=-1,
               **kwargs):
    r"""Apply the batch normalization.
    `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.

    The normalization is defined as:

    .. math:: y = \frac{x - \mathrm{E}[x]}
                       {\sqrt{\mathrm{Var}[x] + \epsilon}}
                  * \gamma + \beta

    The running average of statistics are calculated as:

    .. math:: x_{\text{running}} = \text{momentum} * x_{\text{running}}
                                   + (1 - \text{momentum}) * x_{\text{batch}}

    Parameters
    ----------
    inputs : Sequence[dragon.Tensor]
        The tensor ``x``, ``gamma``, ``beta``, ``mean`` and ``var``.
    axis : int, optional, default=-1
        The channel axis.
    momentum : Union[float, dragon.Tensor], optional
        The value to :math:`\text{momentum}`.
    epsilon : float, optional, default=1e-5
        The value to :math:`\epsilon`.
    use_stats : int, optional, default=-1
        Whether to use estimated statistics or not.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    args['epsilon'] = float(epsilon)
    if context.executing_eagerly():
        return OpLib.execute('BatchNorm',
                             inputs,
                             axis=axis,
                             epsilon=args['epsilon'],
                             use_stats=use_stats,
                             momentum=args['momentum'])
    return OpLib.add('BatchNorm', **args)
Esempio n. 15
0
def where(inputs, **kwargs):
    r"""Select the elements from two branches under the condition.

    .. math::
        \text{out}_{i} =
            \begin{cases}
                \text{input1}_{i}, & \text{ if } \text{condition}_{i} \\
                \text{input2}_{i}, & \text{ otherwise }
            \end{cases}

    Examples:

    ```python
    a = dragon.constant([1, 2, 3])
    b = dragon.constant([3, 2, 1])
    print(dragon.where([a > b, a, b]))  # [3, 2, 3]
    ```

    If only the ``condition`` is given,
    return the coordinates of ``True`` elements:

    ```python
    x = dragon.constant([[True, False, True],
                         [False, True, True]])
    print(dragon.where(x))  # [[0, 0], [0, 2], [1, 1], [1, 2]]
    ```

    Parameters
    ----------
    inputs : Sequence[dragon.Tensor]
        The condition, input1 and input2 tensor.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    See Also
    --------
    `dragon.nonzero(...)`_

    """
    if types.is_tensor(inputs) or len(inputs) == 1:
        return nonzero(inputs, **kwargs)
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('Where', inputs)
    return OpLib.add('Where', inputs, **kwargs)
Esempio n. 16
0
    def register(op_type, args_getter=None):
        """Register an operator.

        Parameters
        ----------
        op_type : str
            The operator type.
        args_getter : callable, optional
            The callable to return the arguments.

        """
        def decorated(inner_function):
            return OpSchema.register_args(op_type, inner_function)
        if args_getter is not None:
            return OpSchema.register_args(op_type, args_getter)
        return decorated
Esempio n. 17
0
def roll(inputs, shift, axis=None, **kwargs):
    """Roll elements along the given axis.

    :attr:`axis` could be negative or ``None``:

    ```python
    x = dragon.constant([[1, 2, 3], [4, 5, 6]])

    # A negative axis is the last-k axis
    print(dragon.roll(x, shift=1, axis=1))  # [[3, 1, 2], [6, 4, 5]]
    print(dragon.roll(x, shift=1, axis=-1))  # Equivalent

    # If axis is None, roll input as a vector
    print(dragon.roll(x, shift=1))  # [[6, 1, 2], [3, 4, 5]]

    # Also, axis could be a sequence of integers
    print(dragon.roll(x, shift=(1, 1), axis=(0, 1)))  # [[6, 4, 5], [3, 1, 2]]
    print(dragon.roll(x, shift=(1, -1), axis=(0, 1)))  # [[5, 6, 4], [2, 3, 1]]
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    shift : Union[int, Sequence[int], dragon.Tensor]
        The rolling offset of each axis.
    axis : Union[int, Sequence[int]], optional
        The axis to roll.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    axes = nest.flatten(axis) if axis is not None else axis
    if isinstance(shift, six.integer_types):
        args['shifts'] = nest.flatten(shift)
    if context.executing_eagerly():
        return OpLib.execute(
            'Roll', inputs, num_shifts=len(args['shifts']),
            shifts=args['shifts'], axes=axes)
    args.pop('axis')
    return OpLib.add('Roll', axes=axes, **args)
Esempio n. 18
0
def slice(inputs, starts, sizes, **kwargs):
    """Select the elements according to the given sections.

    Each section should be hinted by a pair of ``[start, start + size)``:

    ```python
    x = dragon.constant([[[0, 1, 2], [3, 4, 5]]])
    print(dragon.slice(x, [0, 1, 2], [1, 1, 1]))  # [[[5]]]
    print(x[0:1, 1:2:, 2:3])  # Equivalent
    ```

    :attr:`sizes` accepts value ``-1`` or ``0``:

    ```python
    x = dragon.constant([[[0, 1, 2], [3, 4, 5]]])
    # Set ``0`` to squeeze dimensions with size 1
    print(dragon.slice(x, [0, 1, 2], [0, 0, 0]))  # 5
    # Set ``-1`` to take all the remained elements
    print(dragon.slice(x, [0, 0, 0], [-1, -1, -1]))  # [[[0, 1, 2], [3, 4, 5]]]
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    starts : Union[Sequence[int], dragon.Tensor]
        The start location for each dimension.
    sizes : Union[Sequence[int], dragon.Tensor]
        The number of elements sliced from start.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Slice', inputs, ndim=len(args['starts']),
            starts=args['starts'], sizes=args['sizes'])
    return OpLib.add('Slice', **args)
Esempio n. 19
0
def reshape(inputs, shape, copy=True, **kwargs):
    """Change the dimensions of input.

    Examples:

    ```python
    # Provide a determined value for each dimension if possible
    x = dragon.ones(shape=(1, 2, 3, 4))
    print(dragon.reshape(x, shape=(6, 4)).shape)  # (6, 4)

    # Set the existing dimensions to ``0`` if it unchanged
    print(dragon.reshape(x, shape=(0, 0, 12)).shape)  # (1, 2, 12)
    print(dragon.reshape(x, shape=(0, 0, 0, 0)).shape)  # (1, 2, 3, 4)
    print(dragon.reshape(x, shape=(0, 0, 0, 0, 0)).shape)  # Wrong

    # You can also set ``-1`` once to infer the value
    print(dragon.reshape(x, shape=(-1, 4)).shape)  # (6, 4)
    print(dragon.reshape(x, shape=(-1, -1)).shape)  # Wrong
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    shape : Union[Sequence[int], dragon.Tensor]
        The output shape.
    copy : bool, optional, default=True
        Return a new tensor or call in-place.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Reshape', inputs, outputs=[None] if copy else inputs,
            ndim=len(args['dims']), dims=args['dims'])
    args.pop('copy')
    return OpLib.add('Reshape', **args)
Esempio n. 20
0
def broadcast_to(inputs, shape, **kwargs):
    """Broadcast input to the given shape.

    Length of ``shape`` could either be less or more
    than the number of input dimensions:

    ```python
    a = dragon.constant([[1], [2], [3]])
    # Shape: (3, 1) -> (3, 2)
    print(dragon.broadcast_to(a, shape=(3, 2)))
    print(dragon.broadcast_to(a, shape=(2,)))  # Equivalent

    # Shape: (3,) -> (1, 3) -> (2, 3)
    b = dragon.constant([1, 2, 3])
    print(dragon.broadcast_to(b, shape=(2, 3)))

    # Wrong remapping shape: (3,) -> (6,)
    # Only the dimension with size 1 could broadcast
    print(dragon.broadcast_to(b, shape=(6,)))
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    shape : Sequence[Union[int, dragon.Tensor]]
        The output shape to broadcast to.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Expand', inputs, ndim=len(args['dims']), dims=args['dims'])
    return OpLib.add('Expand', **args)
Esempio n. 21
0
def drop_path(inputs, ratio=0.2, inplace=False, **kwargs):
    r"""Set the examples over the input to zero randomly.
    `[Larsson et.al, 2016] <https://arxiv.org/abs/1605.07648>`_.

    The **DropPath** function is defined as:

    .. math:: \text{DropPath}(x_{ij}) = x_{ij} * (r_{i} \sim \mathcal{B}(1, 1 - \text{ratio}))

    Examples:

    ```python
    x = dragon.ones((5, 2), 'float32')
    print(dragon.nn.drop_path(x, ratio=0.5))
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    ratio : Union[float, dragon.Tensor], optional, default=0.2
        The probability to zero an example.
    inplace : bool, optional, default=False
        Call in-place or return a new tensor.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('DropPath',
                             inputs,
                             outputs=inputs if inplace else [None],
                             ratio=args['ratio'])
    args.pop('inplace')
    return OpLib.add('DropPath', **args)
Esempio n. 22
0
def dropout(inputs, ratio=0.5, inplace=False, **kwargs):
    r"""Set the elements of input to zero randomly.
    `[Srivastava et.al, 2014] <http://jmlr.org/papers/v15/srivastava14a.html>`_.

    The **Dropout** function is defined as:

    .. math:: \text{Dropout}(x) = x * (r \sim \mathcal{B}(1, 1 - \text{ratio}))

    Examples:

    ```python
    x = dragon.ones((2, 3), 'float32')
    print(dragon.nn.dropout(x, ratio=0.5))
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    ratio : Union[float, dragon.Tensor], optional, default=0.5
        The probability to zero an element.
    inplace : bool, optional, default=False
        Call in-place or return a new tensor.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute('Dropout',
                             inputs,
                             outputs=inputs if inplace else [None],
                             ratio=args['ratio'])
    args.pop('inplace')
    return OpLib.add('Dropout', **args)
Esempio n. 23
0
def transpose(inputs, perm=None, copy=True, **kwargs):
    """Permute the dimensions of input.

    Examples:

    ```python
    # Provide the permutation for all axes
    x = dragon.ones(shape=(2, 3, 4))
    print(dragon.transpose(x, (0, 2, 1)).shape)  # (2, 4, 3)

    # Or dimensions will be simply inverse
    print(dragon.transpose(x).shape)  # (4, 3, 2)
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    perm : Union[Sequence[int], dragon.Tensor]], optional
        The output permutation.
    copy : bool, optional, default=True
        Return a new tensor or call in-place.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Transpose', inputs,
            outputs=[None] if copy else inputs,
            ndim=len(args['perm']) if perm is not None else 0,
            perm=args['perm'])
    return OpLib.add('Transpose', **args)
Esempio n. 24
0
def repeat(inputs, axis=None, repeats=1, **kwargs):
    """Repeat the elements along the given axis.

    Examples:

    ```python
    x = dragon.constant([[1, 2], [3, 4]])

    # A negative axis is the last-k axis
    print(dragon.repeat(x, axis=1, repeats=2))  # [[1, 1, 2, 2], [3, 3, 4, 4]]
    print(dragon.repeat(x, axis=-1, repeats=2))  # Equivalent

    # If axis is None, repeat a flattened input
    print(dragon.repeat(x, repeats=2))  # [1, 1, 2, 2, 3, 3, 4, 4]
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    axis : int, optional
        The axis to repeat.
    repeats : Union[int, dragon.Tensor], optional, default=1
        The repeat size.

    Returns
    -------
    dragon.Tensor
        The output tensor.

    """
    args = OpSchema.parse_args(locals())
    if context.executing_eagerly():
        return OpLib.execute(
            'Repeat', inputs, axis=axis, repeats=args['repeats'])
    return OpLib.add('Repeat', **args)
Esempio n. 25
0
 def decorated(inner_function):
     return OpSchema.register_args(op_type, inner_function)