Exemple #1
0
def test_smiley_face():
    """Check warping accuracy by comparing to hardcoded warped images."""

    input_file = get_path_to_datafile(
        "image/tests/test_data/Yellow_Smiley_Face.png")
    input_image = load_image(input_file)
    control_points = np.asarray([
        [64, 59],
        [180 - 64, 59],
        [39, 111],
        [180 - 39, 111],
        [90, 143],
        [58, 134],
        [180 - 58, 134],
    ])  # pyformat: disable
    control_point_displacements = np.asarray([
        [-10.5, 10.5],
        [10.5, 10.5],
        [0, 0],
        [0, 0],
        [0, -10],
        [-20, 10.25],
        [10, 10.75],
    ])
    control_points = tf.constant(
        np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
    control_point_displacements = tf.constant(
        np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]), 0))
    float_image = np.expand_dims(np.float32(input_image) / 255, 0)
    input_image = tf.constant(float_image)

    for interpolation_order in (1, 2, 3):
        for num_boundary_points in (0, 1, 4):
            warped_image, _ = sparse_image_warp(
                input_image,
                control_points,
                control_points + control_point_displacements,
                interpolation_order=interpolation_order,
                num_boundary_points=num_boundary_points,
            )

            warped_image = warped_image
            out_image = np.uint8(warped_image[0, :, :, :] * 255)
            target_file = get_path_to_datafile(
                "image/tests/test_data/Yellow_Smiley_Face_Warp-interp" +
                "-{}-clamp-{}.png".format(interpolation_order,
                                          num_boundary_points))

            target_image = load_image(target_file)

            # Check that the target_image and out_image difference is no
            # bigger than 2 (on a scale of 0-255). Due to differences in
            # floating point computation on different devices, the float
            # output in warped_image may get rounded to a different int
            # than that in the saved png file loaded into target_image.
            np.testing.assert_allclose(target_image,
                                       out_image,
                                       atol=2,
                                       rtol=1e-3)
Exemple #2
0
def _get_all_shared_objects():
    custom_ops_dir = get_path_to_datafile("custom_ops", is_so=True)
    all_shared_objects = glob.glob(custom_ops_dir + "/**/*.so", recursive=True)
    all_shared_objects = [x for x in all_shared_objects if Path(x).is_file()]
    return all_shared_objects
Exemple #3
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_activation_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/activations/_activation_ops.so"))


@tf.keras.utils.register_keras_serializable(package='Addons')
@tf.function
def gelu(x, approximate=True):
    """Gaussian Error Linear Unit.

    Computes gaussian error linear:
    `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or
    `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),
    depending on whether approximation is enabled.

    See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
    and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).
Exemple #4
0
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np

import tensorflow as tf

from tensorflow_addons.seq2seq import attention_wrapper
from tensorflow_addons.seq2seq import beam_search_decoder
from tensorflow_addons.utils import test_utils
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_beam_search_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/seq2seq/_beam_search_ops.so"))
gather_tree = _beam_search_ops_so.gather_tree


class TestGatherTree(tf.test.TestCase):
    """Tests the gather_tree function."""
    def test_gather_tree(self):
        # (max_time = 3, batch_size = 2, beam_width = 3)

        # create (batch_size, max_time, beam_width) matrix and transpose it
        predicted_ids = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                                  [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
                                 dtype=np.int32).transpose([1, 0, 2])
        parent_ids = np.array([[[0, 0, 0], [0, 1, 1], [2, 1, 2]],
                               [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
                              dtype=np.int32).transpose([1, 0, 2])
Exemple #5
0
"""Image distance ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_distance_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_distance_ops.so"))

distance_transform_3d = _distance_ops_so.distance_transform3d
distance_transform_2d = _distance_ops_so.distance_transform2d
Exemple #6
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distance transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.image import utils as img_utils
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_image_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_image_ops.so"))

tf.no_gradient("Addons>EuclideanDistanceTransform")


@tf.function
def euclidean_dist_transform(images, dtype=tf.float32, name=None):
    """Applies euclidean distance transform(s) to the image(s).

    Args:
      images: A tensor of shape (num_images, num_rows, num_columns, 1) (NHWC),
        or (num_rows, num_columns, 1) (HWC) or (num_rows, num_columns) (HW).
      dtype: DType of the output tensor.
      name: The name of the op.

    Returns:
Exemple #7
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing correlation cost operation."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils import keras_utils
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_correlation_cost_op_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/layers/_correlation_cost_ops.so"))


@tf.function
def _correlation_cost(input_a,
                      input_b,
                      kernel_size,
                      max_displacement,
                      stride_1,
                      stride_2,
                      pad,
                      data_format='channels_last',
                      name=None):
    """Correlation Cost Volume computation.

    "FlowNet: Learning Optical Flow with Convolutional Networks"
Exemple #8
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All gelu ops."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile
from tensorflow.python.framework import ops

_gelu_op_so = tf.load_op_library(
    get_path_to_datafile('custom_ops/text/_gelu_op.so'))


@ops.RegisterGradient("Gelu")
def _gelu_grad(op, grad):
    """The gradient for `gelu`.

  Args:
    op: The `gelu` `Operation` that we are differentiating, which we can use
      to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `gelu` op.

  Returns:
    Gradients with respect to the input of `gelu`.
  """
    return [_gelu_op_so.gelu_grad(grad, op.inputs[0], op.outputs[0])
Exemple #9
0
"""Image threshold ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_threshold_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_threshold_ops.so"))

image_threshold = _threshold_ops_so.image_threshold
Exemple #10
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parse time ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_parse_time_op = tf.load_op_library(
    get_path_to_datafile("custom_ops/text/_parse_time_op.so"))

tf.no_gradient("Addons>ParseTime")


def parse_time(time_string, time_format, output_unit):
    """Parse an input string according to the provided format string into a
    Unix time.

    Parse an input string according to the provided format string into a Unix
    time, the number of seconds / milliseconds / microseconds / nanoseconds
    elapsed since January 1, 1970 UTC.

    Uses strftime()-like formatting options, with the same extensions as
    FormatTime(), but with the exceptions that %E#S is interpreted as %E*S, and
    %E#f as %E*f.  %Ez and %E*z also accept the same inputs.
Exemple #11
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python layer for Resampler."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_resampler_ops = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_resampler_ops.so"))


@tf.function
def resampler(data, warp, name=None):
    """Resamples input data at user defined coordinates.

    The resampler currently only supports bilinear interpolation of 2D data.
    Args:
      data: Tensor of shape `[batch_size, data_height, data_width,
        data_num_channels]` containing 2D data that will be resampled.
      warp: Tensor of minimum rank 2 containing the coordinates at
      which resampling will be performed. Since only bilinear
      interpolation is currently supported, the last dimension of the
      `warp` tensor must be 2, representing the (x, y) coordinate where
      x is the index for width and y is the index for height.
Exemple #12
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import csv
import tensorflow as tf

from tensorflow_addons.utils.resource_loader import get_path_to_datafile

skip_gram_ops = tf.load_op_library(
    get_path_to_datafile("custom_ops/text/_skip_gram_ops.so"))

tf.no_gradient("SkipGramGenerateCandidates")


def skip_gram_sample(input_tensor,
                     min_skips=1,
                     max_skips=5,
                     start=0,
                     limit=-1,
                     emit_self_as_target=False,
                     vocab_freq_table=None,
                     vocab_min_count=None,
                     vocab_subsampling=None,
                     corpus_size=None,
                     batch_size=None,
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.utils.resource_loader import get_path_to_datafile
import numpy as np

_gelu_op_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/text/_gelu_op.so"))
gelu = _gelu_op_so.gelu

# reference function to compare against
# source: https://github.com/google-research/bert/blob/bee6030e31e42a9394ac567da170a89a98d2062f/modeling.py#L264
def gelu_ref(x):
  """Gaussian Error Linear Unit.
  This is a smoother version of the RELU.
  Original paper: https://arxiv.org/abs/1606.08415
  Args:
    x: float Tensor to perform activation.
  Returns:
    `x` with the GELU activation applied.
  """
  cdf = 0.5 * (1.0 + tf.tanh(
      (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))