コード例 #1
0
ファイル: hardshrink.py プロジェクト: xvr-hlt/addons
def hardshrink(
    x: types.TensorLike, lower: Number = -0.5, upper: Number = 0.5
) -> tf.Tensor:
    """Hard shrink function.

    Computes hard shrink function:
    `x if x < lower or x > upper else 0`.

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
        lower: `float`, lower bound for setting values to zeros.
        upper: `float`, upper bound for setting values to zeros.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _hardshrink_custom_op(x, lower, upper)
        except tf.errors.NotFoundError:
            options.warn_fallback("hardshrink")

    return _hardshrink_py(x, lower, upper)
コード例 #2
0
ファイル: lisht.py プロジェクト: xuhao1/addons
def lisht(x: types.TensorLike) -> tf.Tensor:
    r"""LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function.

    Computes linearly scaled hyperbolic tangent (LiSHT):

    $$
    \mathrm{lisht}(x) = x * \tanh(x).
    $$

    See [LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function for Neural Networks](https://arxiv.org/abs/1901.05894).

    Usage:

    >>> x = tf.constant([1.0, 0.0, 1.0])
    >>> tfa.activations.lisht(x)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.7615942, 0.       , 0.7615942], dtype=float32)>

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _lisht_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("lisht")

    return _lisht_py(x)
コード例 #3
0
def gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:
    """Gaussian Error Linear Unit.

    Computes gaussian error linear:
    `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or
    `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),
    depending on whether approximation is enabled.

    See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
    and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
        approximate: bool, whether to enable approximation.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    warnings.warn(
        "gelu activation has been migrated to core TensorFlow, "
        "and will be deprecated in Addons 0.12.",
        DeprecationWarning,
    )

    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _gelu_custom_op(x, approximate)
        except tf.errors.NotFoundError:
            options.warn_fallback("gelu")

    return _gelu_py(x, approximate)
コード例 #4
0
ファイル: mish.py プロジェクト: xuhao1/addons
def mish(x: types.TensorLike) -> tf.Tensor:
    r"""Mish: A Self Regularized Non-Monotonic Neural Activation Function.

    Computes mish activation:

    $$
    \mathrm{mish}(x) = x \cdot \tanh(\mathrm{softplus}(x)).
    $$

    See [Mish: A Self Regularized Non-Monotonic Neural Activation Function](https://arxiv.org/abs/1908.08681).

    Usage:

    >>> x = tf.constant([1.0, 0.0, 1.0])
    >>> tfa.activations.mish(x)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.86509836, 0.        , 0.86509836], dtype=float32)>

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _mish_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("mish")

    return _mish_py(x)
コード例 #5
0
def tanhshrink(x: types.TensorLike) -> tf.Tensor:
    r"""Tanh shrink function.

    Applies the element-wise function:

    $$
    \mathrm{tanhshrink}(x) = x - \tanh(x).
    $$

    Usage:

    >>> x = tf.constant([-1.0, 0.0, 1.0])
    >>> tfa.activations.tanhshrink(x)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.23840582,  0.        ,  0.23840582], dtype=float32)>

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _tanhshrink_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("tanhshrink")

    return _tanhshrink_py(x)
コード例 #6
0
ファイル: gelu.py プロジェクト: xuhao1/addons
def gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:
    r"""Gaussian Error Linear Unit.

    Computes gaussian error linear:

    $$
    \mathrm{gelu}(x) = x \Phi(x),
    $$

    where

    $$
    \Phi(x) = \frac{1}{2} \left[ 1 + \mathrm{erf}(\frac{x}{\sqrt{2}}) \right]$
    $$

    when `approximate` is `False`; or

    $$
    \Phi(x) = \frac{x}{2} \left[ 1 + \tanh(\sqrt{\frac{2}{\pi}} \cdot (x + 0.044715 \cdot x^3)) \right]
    $$

    when `approximate` is `True`.

    See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
    and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).

    Usage:

    >>> tfa.options.TF_ADDONS_PY_OPS = True
    >>> x = tf.constant([-1.0, 0.0, 1.0])
    >>> tfa.activations.gelu(x, approximate=False)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.15865529,  0.        ,  0.8413447 ], dtype=float32)>
    >>> tfa.activations.gelu(x, approximate=True)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.158808,  0.      ,  0.841192], dtype=float32)>

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
        approximate: bool, whether to enable approximation.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    warnings.warn(
        "gelu activation has been migrated to core TensorFlow, "
        "and will be deprecated in Addons 0.12.",
        DeprecationWarning,
    )

    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _gelu_custom_op(x, approximate)
        except tf.errors.NotFoundError:
            options.warn_fallback("gelu")

    return _gelu_py(x, approximate)
コード例 #7
0
ファイル: tanhshrink.py プロジェクト: xvr-hlt/addons
def tanhshrink(x: types.TensorLike) -> tf.Tensor:
    """Applies the element-wise function: x - tanh(x)

    Args:
        features: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `features`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _tanhshrink_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("tanhshrink")

    return _tanhshrink_py(x)
コード例 #8
0
ファイル: mish.py プロジェクト: xvr-hlt/addons
def mish(x: types.TensorLike) -> tf.Tensor:
    """Mish: A Self Regularized Non-Monotonic Neural Activation Function.

    Computes mish activation: x * tanh(softplus(x))

    See [Mish: A Self Regularized Non-Monotonic Neural Activation Function](https://arxiv.org/abs/1908.08681).

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _mish_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("mish")

    return _mish_py(x)
コード例 #9
0
ファイル: softshrink.py プロジェクト: xuhao1/addons
def softshrink(
    x: types.TensorLike, lower: Number = -0.5, upper: Number = 0.5
) -> tf.Tensor:
    r"""Soft shrink function.

    Computes soft shrink function:

    $$
    \mathrm{softshrink}(x) =
    \begin{cases}
        x - \mathrm{lower} & \text{if } x < \mathrm{lower} \\
        x - \mathrm{upper} & \text{if } x > \mathrm{upper} \\
        0                  & \text{otherwise}
    \end{cases}.
    $$

    Usage:

    >>> x = tf.constant([-1.0, 0.0, 1.0])
    >>> tfa.activations.softshrink(x)
    <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.5,  0. ,  0.5], dtype=float32)>

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
        lower: `float`, lower bound for setting values to zeros.
        upper: `float`, upper bound for setting values to zeros.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _softshrink_custom_op(x, lower, upper)
        except tf.errors.NotFoundError:
            options.warn_fallback("softshrink")

    return _softshrink_py(x, lower, upper)
コード例 #10
0
def lisht(x: types.TensorLike) -> tf.Tensor:
    """LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function.

    Computes linearly scaled hyperbolic tangent (LiSHT): `x * tanh(x)`

    See [LiSHT: Non-Parameteric Linearly Scaled Hyperbolic Tangent Activation Function for Neural Networks](https://arxiv.org/abs/1901.05894).

    Args:
        x: A `Tensor`. Must be one of the following types:
            `float16`, `float32`, `float64`.
    Returns:
        A `Tensor`. Has the same type as `x`.
    """
    x = tf.convert_to_tensor(x)

    if not options.TF_ADDONS_PY_OPS:
        try:
            return _lisht_custom_op(x)
        except tf.errors.NotFoundError:
            options.warn_fallback("lisht")

    return _lisht_py(x)
コード例 #11
0
ファイル: distort_image_ops.py プロジェクト: xnchu/addons
def adjust_hsv_in_yiq(
    image: TensorLike,
    delta_hue: Number = 0,
    scale_saturation: Number = 1,
    scale_value: Number = 1,
    name: Optional[str] = None,
) -> tf.Tensor:
    """Adjust hue, saturation, value of an RGB image in YIQ color space.

    This is a convenience method that converts an RGB image to float
    representation, converts it to YIQ, rotates the color around the
    Y channel by delta_hue in radians, scales the chrominance channels
    (I, Q) by scale_saturation, scales all channels (Y, I, Q) by scale_value,
    converts back to RGB, and then back to the original data type.

    `image` is an RGB image. The image hue is adjusted by converting the
    image to YIQ, rotating around the luminance channel (Y) by
    `delta_hue` in radians, multiplying the chrominance channels (I, Q) by
    `scale_saturation`, and multiplying all channels (Y, I, Q) by
    `scale_value`. The image is then converted back to RGB.

    Args:
      image: RGB image or images. Size of the last dimension must be 3.
      delta_hue: `float`, the hue rotation amount, in radians.
      scale_saturation: `float`, factor to multiply the saturation by.
      scale_value: `float`, factor to multiply the value by.
      name: A name for this operation (optional).

    Returns:
      Adjusted image(s), same shape and dtype as `image`.
    """
    with tf.name_scope(name or "adjust_hsv_in_yiq"):
        image = tf.convert_to_tensor(image, name="image")
        # Remember original dtype to so we can convert back if needed
        orig_dtype = image.dtype
        if not image.dtype.is_floating:
            image = tf.image.convert_image_dtype(image, tf.float32)

        delta_hue = tf.cast(delta_hue, dtype=image.dtype, name="delta_hue")
        scale_saturation = tf.cast(scale_saturation,
                                   dtype=image.dtype,
                                   name="scale_saturation")
        scale_value = tf.cast(scale_value,
                              dtype=image.dtype,
                              name="scale_value")

        if not options.is_custom_kernel_disabled():
            warnings.warn(
                "C++/CUDA kernel of `adjust_hsv_in_yiq` will be removed in Addons `0.13`.",
                DeprecationWarning,
            )
            try:
                image = _distort_image_so.ops.addons_adjust_hsv_in_yiq(
                    image, delta_hue, scale_saturation, scale_value)
            except tf.errors.NotFoundError:
                options.warn_fallback("adjust_hsv_in_yiq")
                image = _adjust_hsv_in_yiq(image, delta_hue, scale_saturation,
                                           scale_value)
        else:
            image = _adjust_hsv_in_yiq(image, delta_hue, scale_saturation,
                                       scale_value)

        return tf.image.convert_image_dtype(image, orig_dtype)