コード例 #1
0
ファイル: graph.py プロジェクト: zwxlib/MegEngine
def add_update(dest: Tensor,
               delta: Tensor,
               *,
               alpha: Union[Tensor, float, int] = 1.0,
               beta: Union[Tensor, float, int] = 1.0,
               bias: Union[Tensor, float, int] = 0.0):
    r"""Inplace modify ``dest`` as follows:

    .. math::
        dest = alpha * dest +  beta * delta + bias

    :param dest: input data that will be inplace modified.
    :param delta: update value that will be added to ``dest``.
    :param alpha: weight ratio of ``dest``. Default: 1.0
    :param beta: weight ratio of ``delta``. Default: 1.0
    :param bias: bias value appended to the result. Default: 0.0
    """

    if isinstance(beta, Tensor) or isinstance(alpha, Tensor):
        delta *= beta
        beta = 1.0
    if isinstance(alpha, Tensor):
        delta += (alpha - 1.0) * dest
        alpha = 1.0
    if isinstance(bias, Tensor):
        delta += bias
        bias = 0.0

    comp_graph = dest._comp_graph or get_default_graph()
    comp_node = dest._comp_node

    if not isinstance(delta, Tensor):
        _delta = mgb.make_immutable(value=delta,
                                    comp_node=comp_node,
                                    comp_graph=comp_graph)
    else:
        _delta = delta._attach(comp_graph)

    _dest = dest._attach(comp_graph)

    # use (dest, delta) as the key, so we could not add the same delta to dest in static graph
    key = (comp_graph._id(), _dest.id, _delta.id)
    if key in _add_update_cache:
        _alpha, _beta, _bias, config = _add_update_cache[key]
        mgb.mgb._mgb.SharedScalar__set(_alpha, alpha)
        mgb.mgb._mgb.SharedScalar__set(_beta, beta)
        mgb.mgb._mgb.SharedScalar__set(_bias, bias)
    else:
        _alpha = mgb.SharedScalar(alpha)
        _beta = mgb.SharedScalar(beta)
        _bias = mgb.SharedScalar(bias)
        config = mgb.helper.gen_config(None, comp_node, None)
        _add_update_cache[key] = (_alpha, _beta, _bias, config)

    u = mgb.mgb._Opr.add_update(_dest, barrier(_delta), _alpha, _beta, _bias,
                                _dummy, config)
    mark_impure(u)

    return Tensor(u)
コード例 #2
0
def scalar(
    value,
    dtype: type = None,
    device: Optional[mgb.CompNode] = None,
    comp_graph: Optional[mgb.CompGraph] = None,
) -> Tensor:
    device, comp_graph = _use_default_if_none(device, comp_graph)
    return Tensor(mgb.make_immutable(device, comp_graph, value, dtype=dtype, name=None))
コード例 #3
0
ファイル: nn.py プロジェクト: zwxlib/MegEngine
def one_hot(inp: Tensor, num_classes: int) -> Tensor:
    r"""
    Perform one-hot encoding for the input tensor.

    :param inp: input tensor
    :param num_classes: number of classes denotes the last dimension of the output tensor

    Examples:

    .. testcode::

        import numpy as np
        from megengine import tensor
        import megengine.functional as F

        inp = tensor(np.arange(1, 4, dtype=np.int32))
        out = F.one_hot(inp, num_classes=4)
        print(out.numpy())

    Outputs:

    .. testoutput::

        [[0 1 0 0]
         [0 0 1 0]
         [0 0 0 1]]

    """
    comp_node, comp_graph = _decide_comp_node_and_comp_graph(inp)

    zeros = mgb.make_immutable(value=0,
                               comp_node=comp_node,
                               comp_graph=comp_graph)
    zeros_symvar = zeros.broadcast(inp.shapeof(), num_classes)

    ones = mgb.make_immutable(value=1,
                              comp_node=comp_node,
                              comp_graph=comp_graph)
    ones_symvar = ones.broadcast(inp.shapeof(), 1)

    return Tensor(
        mgb.opr.indexing_set_one_hot(zeros_symvar,
                                     axis=len(inp.shapeof()),
                                     index=inp,
                                     value=ones_symvar))
コード例 #4
0
ファイル: tensor_factory.py プロジェクト: zwxlib/MegEngine
def scalar(
    value,
    dtype: type = None,
    device: Optional[mgb.CompNode] = None,
    comp_graph: Optional[mgb.CompGraph] = None,
) -> Tensor:
    """
    convert ``value`` to the type of :class:`~.Tensor`.
    """
    device, comp_graph = _use_default_if_none(device, comp_graph)
    return Tensor(
        mgb.make_immutable(device, comp_graph, value, dtype=dtype, name=None))
コード例 #5
0
ファイル: nn.py プロジェクト: greatlog/MegEngine
def batch_norm2d(
    inp: Tensor,
    running_mean: Tensor,
    running_var: Tensor,
    weight: Optional[Tensor] = None,
    bias: Optional[Tensor] = None,
    training: bool = False,
    momentum: float = 0.9,
    eps: float = 1e-5,
) -> Tensor:
    """Applies batch normalization to the input.

    :param inp: input tensor.
    :param running_mean: tensor to store running mean.
    :param running_var: tensor to store running variance.
    :param weight: scaling tensor in the learnable affine parameters.
        See :math:`\gamma` in :class:`~.BatchNorm2d`
    :param bias: bias tensor in the learnable affine parameters.
        See :math:`\beta` in :class:`~.BatchNorm2d`
    :param training: a boolean value to indicate whether batch norm is performed
        in traning mode. Default: ``False``
    :param momentum: the value used for the ``running_mean`` and ``running_var``
        computation.
        Default: 0.9
    :param eps: a value added to the denominator for numerical stability.
        Default: 1e-5.

    Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
    """

    inp = mgb.opr.mark_no_broadcast_elemwise(inp)
    _channels = inp.imm_shape[1]
    _ndim = len(inp.imm_shape)
    _param_shape = (1, _channels) + (1,) * (_ndim - 2)

    assert _ndim == 4, "only 4D tensor supported"

    if weight is not None:
        weight = weight.reshape(*_param_shape)
    else:
        weight = mgb.make_immutable(*_use_default_if_none(None, None), 1.0).broadcast(
            *_param_shape
        )

    if bias is not None:
        bias = bias.reshape(*_param_shape)
    else:
        bias = mgb.make_immutable(*_use_default_if_none(None, None), 0.0).broadcast(
            *_param_shape
        )

    FwdMode = mgb.opr_param_defs.BN.FwdMode
    fwdmode = FwdMode.TRAINING if training else FwdMode.INFERENCE
    avg_factor = 1 - momentum

    if running_mean is not None and running_var is not None:
        if training:
            inp = barrier(inp)

        output = mgb.opr.batch_norm(
            inp,
            weight,
            bias,
            running_mean,
            running_var,
            param_dim="DIM_1C11",
            fwd_mode=fwdmode,
            epsilon=eps,
            avg_factor=avg_factor,
        )[-1]
        if training:
            mark_impure(output)
    else:
        output = mgb.opr.batch_norm_no_statistic(
            inp,
            weight,
            bias,
            param_dim="DIM_1C11",
            fwd_mode=fwdmode,
            epsilon=eps,
            avg_factor=avg_factor,
        )[-1]

    return output
コード例 #6
0
def _dummy():
    return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
コード例 #7
0
ファイル: nn.py プロジェクト: zymspindrift/MegEngine
def batch_norm2d(
    inp: Tensor,
    running_mean: Tensor,
    running_var: Tensor,
    weight: Optional[Tensor] = None,
    bias: Optional[Tensor] = None,
    training: bool = False,
    momentum: float = 0.9,
    eps: float = 1e-5,
) -> Tensor:
    """Applies batch normalization to the input.

    :type inp: Tensor
    :param inp: The input tensor.
    :type num_features: int
    :param num_features: usually the :math:`C` from an input of size
        :math:`(N, C, H, W)` or the highest ranked dimension of an input with
        less than 4D.
    :type eps: float
    :param eps: a value added to the denominator for numerical stability.
        Default: 1e-5.
    :type momentum: float
    :param momentum: the value used for the `running_mean` and `running_var`
        computation.
        Default: 0.1
    :type affine: bool
    :param affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters. Default: ``True``
    :type track_running_stats: bool
    :param track_running_stats: when set to ``True``, this module tracks the
        running mean and variance. When set to ``False``, this module does not
        track such statistics and always uses batch statistics in both training
        and eval modes. Default: ``True``.

    Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
    """

    inp = mgb.opr.mark_no_broadcast_elemwise(inp)
    _channels = inp.imm_shape[1]
    _ndim = len(inp.imm_shape)
    _param_shape = (1, _channels) + (1,) * (_ndim - 2)

    assert _ndim == 4, "only 4D tensor supported"

    if weight is not None:
        weight = weight.reshape(*_param_shape)
    else:
        weight = mgb.make_immutable(*_use_default_if_none(None, None), 1.0).broadcast(
            *_param_shape
        )

    if bias is not None:
        bias = bias.reshape(*_param_shape)
    else:
        bias = mgb.make_immutable(*_use_default_if_none(None, None), 0.0).broadcast(
            *_param_shape
        )

    FwdMode = mgb.opr_param_defs.BN.FwdMode
    fwdmode = FwdMode.TRAINING if training else FwdMode.INFERENCE
    avg_factor = 1 - momentum

    if running_mean is not None and running_var is not None:
        if training:
            inp = barrier(inp)

        output = mgb.opr.batch_norm(
            inp,
            weight,
            bias,
            running_mean,
            running_var,
            param_dim="DIM_1C11",
            fwd_mode=fwdmode,
            epsilon=eps,
            avg_factor=avg_factor,
        )[-1]
        if training:
            mark_impure(output)
    else:
        output = mgb.opr.batch_norm_no_statistic(
            inp,
            weight,
            bias,
            param_dim="DIM_1C11",
            fwd_mode=fwdmode,
            epsilon=eps,
            avg_factor=avg_factor,
        )[-1]

    return output