Example #1
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parse time ops."""

import tensorflow as tf

from tensorflow_addons.utils.resource_loader import LazySO

_parse_time_so = LazySO("custom_ops/text/_parse_time_op.so")

tf.no_gradient("Addons>ParseTime")


def parse_time(time_string: str, time_format: str, output_unit: str) -> str:
    """Parse an input string according to the provided format string into a
    Unix time.

    Parse an input string according to the provided format string into a Unix
    time, the number of seconds / milliseconds / microseconds / nanoseconds
    elapsed since January 1, 1970 UTC.

    Uses strftime()-like formatting options, with the same extensions as
    FormatTime(), but with the exceptions that %E#S is interpreted as %E*S, and
    %E#f as %E*f.  %Ez and %E*z also accept the same inputs.

    %Y consumes as many numeric characters as it can, so the matching
Example #2
0
        nvOut, mfOut = decimation_module.mesh_decimation(vertexIn, faceIn, geometryIn, \
                                                         nvIn_cumsum, mfIn_cumsum, nv2Remove, \
                                                         useArea, wgtBnd)
        faceIn = tf.gather_nd(faceOut, tf.where(tf.logical_not(isDegenerate)))
        vertexIn = tf.gather_nd(vertexOut, tf.where(mapOut>=0))
        geometryIn = compute_triangle_geometry_(vertexIn, faceIn)
        nv2Remove = nv2Remove - (nvIn - nvOut)
        repIn, mapIn = combine_clusters_(repIn, mapIn, repOut, mapOut)
        nvIn, mfIn = nvOut, mfOut
        return [nv2Remove, vertexIn, faceIn, geometryIn, repIn, mapIn, nvIn, mfIn]

    final = tf.while_loop(condition, body, loop_vars, shape_invariants)
    vertexOut, faceOut, geometryOut, repOut, mapOut, nvOut, mfOut = final[1:]

    return vertexOut, faceOut, geometryOut, nvOut, mfOut, repOut, mapOut
tf.no_gradient('MeshDecimation')


def combine_clusters_(repA, mapA, repB, mapB):
    '''
       input:
            repA: (batch_points,) int32 array, vertex clustering information of LARGE input
            mapA: (batch_points,) int32 array, vertex mappinging information of LARGE input
            repB: (batch_points,) int32 array, vertex clustering information of SMALL/decimated input
            mapB: (batch_points,) int32 array, vertex mappinging information of SMALL/decimated input
       returns:
            repComb: (batch_points,) int32 array, vertex clustering information after merging LARGE/SMALL input
            mapComb: (batch_points,) int32 array, vertex mappinging information after merging LARGE/SMALL input
    '''
    repComb, mapComb = decimation_module.combine_clusters(repA, mapA, repB, mapB)
    return repComb, mapComb
Example #3
0
    model_proto: The sentencepiece model serialized proto. Either `model_file`
      or `model_proto` must be set.
    reverse: Reverses the tokenized sequence (Default = false)
    name: The name argument that is passed to the op function.

  Returns:
    text: A 1D string tensor of decoded string.
  """

    return _gen_sentencepiece_processor_op.sentencepiece_decode(
        pieces,
        sequence_length,
        model_file=model_file,
        model_proto=model_proto,
        reverse=reverse,
        name=name)


# Adds an alias for encode_dense. Accepts the `encode` function.
encode = encode_dense
sparse_encode = encode_sparse
dense_encode = encode_dense

tf.no_gradient('SentencepieceGetPieceSize')
tf.no_gradient('SentencepieceIdToPiece')
tf.no_gradient('SentencepiecePieceToId')
tf.no_gradient('SentencepieceGetPieceType')
tf.no_gradient('SentencepieceEncodeDense')
tf.no_gradient('SentencepieceEncodeSparse')
tf.no_gradient('SentencepieceDecode')
Example #4
0

def fps_(xyz_in, nv_in, nv_out):
    '''
    input:
        database:   (Np, 3) float32 array, database points
        nvDatabase: (batch_size) int32 vector, number of points of each batch sample
        nv_out: (batch_size) int32, number of output points of each batch sample
    returns:
        sample_index: (Mp) int32 array, index of sampled neurons in the database
    '''
    nv_in = tf.cumsum(nv_in, exclusive=False)
    nv_out = tf.cumsum(nv_out, exclusive=False)
    print(nv_in, nv_out)
    return module.farthest_point_sample(xyz_in, nv_in, nv_out)
tf.no_gradient('FarthestPointSample')



def fps_dim3_(database, mPoint):
    '''
    input:
        neursize: int32, the number of neurons/points to be sampled
        database: (batch, npoint, 3) float32 array, database points
    returns:
        neuron_index: (batch_size, neursize) int32 array, index of sampled neurons in the database
    '''
    return module.farthest_point_sample3d(database, mPoint)
tf.no_gradient('FarthestPointSample3D')

Example #5
0
    query = query[..., 0:3]
    nvDatabase = tf.cumsum(nvDatabase, exclusive=False)
    nvQuery = tf.cumsum(nvQuery, exclusive=False)

    if nnsample is None:
        nnsample = 2147483647  # int32 maximum value
    # print('nnSample:', nnsample)

    cntInfo, nnIndex, nnDist = nnquery_module.build_sphere_neighbor(
        database, query, nvDatabase, nvQuery, radius, nnsample)
    nnCount = tf.concat([cntInfo[:1], cntInfo[1:] - cntInfo[:-1]], axis=0)
    nnCount = tf.cast(nnCount, dtype=tf.int32)
    return cntInfo, nnCount, nnIndex, nnDist


tf.no_gradient('BuildSphereNeighbor')


def cube_search_(database,
                 query,
                 nvDatabase,
                 nvQuery,
                 length=0.1,
                 dilation_rate=None,
                 nnsample=None,
                 gridsize=3):
    '''
    Input:
        database: (concat_Np, 3) float32 array, database points
        query:    (concat_Mp, 3) float32 array, query points
        length:   float32, cube search length
Example #6
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow.python import pywrap_tensorflow

coref_op_library = tf.load_op_library("./coref_kernels.so")

extract_spans = coref_op_library.extract_spans
tf.no_gradient("ExtractSpans")
Example #7
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distance transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.image import utils as img_utils
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_image_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_image_ops.so"))

tf.no_gradient("Addons>EuclideanDistanceTransform")


@tf.function
def euclidean_dist_transform(images, dtype=tf.float32, name=None):
    """Applies euclidean distance transform(s) to the image(s).

    Args:
      images: A tensor of shape (num_images, num_rows, num_columns, 1) (NHWC),
        or (num_rows, num_columns, 1) (HWC) or (num_rows, num_columns) (HW).
      dtype: DType of the output tensor.
      name: The name of the op.

    Returns:
      Image(s) with the type `dtype` and same shape as `images`, with the
      transform applied. If a tensor of all ones is given as input, the
Example #8
0
        per dimension.
      cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per
        dimension.

    Returns:
      An `int` `Tensor` of shape `[N]`, the keys per point.

  """
    aabb = point_cloud.get_AABB()
    return tfg_custom_ops.compute_keys(point_cloud._points,
                                       point_cloud._batch_ids,
                                       aabb._aabb_min / cell_size, num_cells,
                                       tf.math.reciprocal(cell_size))


tf.no_gradient('ComputeKeys')


def build_grid_ds(sorted_keys, num_cells, batch_size, name=None):
    """ Method to build a fast access data structure for point clouds.

  Creates a 2D regular grid in the first two dimension, saving the first and
  last index belonging to that cell array.
  Args:
    sorted_keys: An `int` `Tensor` of shape `[N]`, the sorted keys.
    num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells
      per dimension.
    batch_size: An `int`.

  Returns:
    An `int` `Tensor` of shape `[batch_size, num_cells[0], num_cells[1], 2]`.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distance transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow_addons.image import utils as img_utils
from tensorflow_addons.utils.resource_loader import get_path_to_datafile

_image_ops_so = tf.load_op_library(
    get_path_to_datafile("custom_ops/image/_image_ops.so"))

tf.no_gradient("EuclideanDistanceTransform")


@tf.function
def euclidean_dist_transform(images, dtype=tf.float32, name=None):
    """Applies euclidean distance transform(s) to the image(s).

    Args:
      images: A tensor of shape (num_images, num_rows, num_columns, 1) (NHWC),
        or (num_rows, num_columns, 1) (HWC) or (num_rows, num_columns) (HW).
      dtype: DType of the output tensor.
      name: The name of the op.

    Returns:
      Image(s) with the type `dtype` and same shape as `images`, with the
      transform applied. If a tensor of all ones is given as input, the
Example #10
0
      warp: Tensor of minimum rank 2 containing the coordinates at
      which resampling will be performed. Since only bilinear
      interpolation is currently supported, the last dimension of the
      `warp` tensor must be 2, representing the (x, y) coordinate where
      x is the index for width and y is the index for height.
      name: Optional name of the op.
    Returns:
      Tensor of resampled values from `data`. The output tensor shape
      is determined by the shape of the warp tensor. For example, if `data`
      is of shape `[batch_size, data_height, data_width, data_num_channels]`
      and warp of shape `[batch_size, dim_0, ... , dim_n, 2]` the output will
      be of shape `[batch_size, dim_0, ... , dim_n, data_num_channels]`.
    Raises:
      ImportError: if the wrapper generated during compilation is not
      present when the function is called.
    """
    with tf.name_scope(name or "resampler"):
        data_tensor = tf.convert_to_tensor(data, name="data")
        warp_tensor = tf.convert_to_tensor(warp, name="warp")
        return _resampler_ops.addons_resampler(data_tensor, warp_tensor)


@tf.RegisterGradient("Addons>Resampler")
def _resampler_grad(op, grad_output):
    data, warp = op.inputs
    grad_output_tensor = tf.convert_to_tensor(grad_output, name="grad_output")
    return _resampler_ops.addons_resampler_grad(data, warp, grad_output_tensor)


tf.no_gradient("Addons>ResamplerGrad")
Example #11
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""

import csv
import tensorflow as tf

from tensorflow_addons.utils.resource_loader import LazySO

from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike, TensorLike
from typing import Optional

_skip_gram_so = LazySO("custom_ops/text/_skip_gram_ops.so")

tf.no_gradient("Addons>SkipGramGenerateCandidates")


def skip_gram_sample(
    input_tensor: TensorLike,
    min_skips: FloatTensorLike = 1,
    max_skips: FloatTensorLike = 5,
    start: FloatTensorLike = 0,
    limit: FloatTensorLike = -1,
    emit_self_as_target: bool = False,
    vocab_freq_table: tf.lookup.KeyValueTensorInitializer = None,
    vocab_min_count: Optional[FloatTensorLike] = None,
    vocab_subsampling: Optional[FloatTensorLike] = None,
    corpus_size: Optional[FloatTensorLike] = None,
    batch_size: Optional[FloatTensorLike] = None,
    batch_capacity: Optional[FloatTensorLike] = None,
Example #12
0
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import csv
import tensorflow as tf

from tensorflow_addons.utils.resource_loader import get_path_to_datafile

skip_gram_ops = tf.load_op_library(
    get_path_to_datafile("custom_ops/text/_skip_gram_ops.so"))

tf.no_gradient("SkipGramGenerateCandidates")


def skip_gram_sample(input_tensor,
                     min_skips=1,
                     max_skips=5,
                     start=0,
                     limit=-1,
                     emit_self_as_target=False,
                     vocab_freq_table=None,
                     vocab_min_count=None,
                     vocab_subsampling=None,
                     corpus_size=None,
                     batch_size=None,
                     batch_capacity=None,
                     seed=None,
Example #13
0
        nn_index: (Nout, 2) int32 array, neighbor indices
        nn_dist:  (Nout) float32, sqrt distance array
        radius:   float32, range search radius
        kernel:   list of 3 int32, spherical kernel size
    Output:
        filt_index: (Nout) int32 array, filter bin indices
    '''
    n, p, q = kernel

    database = database[:, 0:3]  #(x,y,z)
    query = query[:, 0:3]  #(x,y,z)
    return buildkernel_module.spherical_kernel(database, query, nn_index,
                                               nn_dist, radius, n, p, q)


tf.no_gradient('SphericalKernel')


def fuzzySPH3Dkernel_(database,
                      query,
                      nn_index,
                      nn_dist,
                      radius,
                      kernel=[8, 4, 1]):
    '''
    Input:
        database: (concat_Np, 3+) float32 array, database points (x,y,z,...)
        query:    (concat_Mp, 3+) float32 array, query points (x,y,z,...)
        nn_index: (Nout, 2) int32 array, neighbor indices
        nn_dist:  (Nout) float32, sqrt distance array
        radius:   float32, range search radius
                key_dtype=self._key_dtype,
                value_dtype=self._value_dtype)
            return keys, values

    def do_import(self, keys, values, name=None):
        """Import all `key` and `value` pairs.

    (Note that "import" is a python reserved word, so it cannot be the name of
    a method.)

    Args:
      keys: Tensor of all keys.
      values: Tensor of all values.
      name: A name for the operation (optional).

    Returns:
      A tuple of two tensors, the first with the `keys` and the second with
      the `values`.
    """
        with tf.name_scope(name or "%s_lookup_table_import" % self._name):
            # pylint: disable=protected-access
            op = gen_simple_hash_table_op.examples_simple_hash_table_import(
                self.resource_handle, keys, values)
            return op


tf.no_gradient("Examples>SimpleHashTableCreate")
tf.no_gradient("Examples>SimpleHashTableFind")
tf.no_gradient("Examples>SimpleHashTableInsert")
tf.no_gradient("Examples>SimpleHashTableRemove")
Example #15
0
    abb_min_per_batch = aabb._aabb_min
    aabb_min_per_point = tf.gather(abb_min_per_batch, point_cloud._batch_ids)
    cell_ind = tf.math.floor(
        (point_cloud._points - aabb_min_per_point) / cell_size)
    cell_ind = tf.cast(cell_ind, tf.int32)
    cell_ind = tf.minimum(tf.maximum(cell_ind, tf.zeros_like(cell_ind)),
                          num_cells)
    cell_multiplier = tf.math.cumprod(num_cells, reverse=True)
    cell_multiplier = tf.concat((cell_multiplier, [1]), axis=0)
    keys = point_cloud._batch_ids * cell_multiplier[0] + \
        tf.math.reduce_sum(cell_ind * tf.reshape(cell_multiplier[1:], [1, -1]),
                           axis=1)
    return tf.cast(keys, tf.int64)


tf.no_gradient('ComputeKeysTF')


def build_grid_ds_tf(sorted_keys, num_cells, batch_size, name=None):
    """ Method to build a fast access data structure for point clouds.

  Creates a 2D regular grid in the first two dimension, saving the first and
  last index belonging to that cell array.

  Args:
    sorted_keys: An `int` `Tensor` of shape `[N]`, the sorted keys.
    num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells
      per dimension.
    batch_size: An `int`.

  Returns:
Example #16
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import tensorflow as tf

tfmiss_ops = tf.load_op_library(
    os.path.join(os.path.dirname(os.path.abspath(__file__)), '_tfmiss_ops.so'))

# preprocessing
tf.no_gradient('Miss>SampleMask')
tf.no_gradient('Miss>SkipGram')
tf.no_gradient('Miss>ContBow')


# qrnn
@tf.RegisterGradient('Miss>TimeMajorFoPool')
def _fo_pool_time_grad(op, grad):
    return tfmiss_ops.miss_time_major_bwd_fo_pool(h=op.outputs[0],
                                                  x=op.inputs[0],
                                                  forget=op.inputs[1],
                                                  gh=grad)


@tf.RegisterGradient('Miss>BatchMajorFoPool')
def _fo_pool_batch_grad(op, grad):
    return tfmiss_ops.miss_batch_major_bwd_fo_pool(h=op.outputs[0],
                                                   x=op.inputs[0],
                                                   forget=op.inputs[1],
                                                   gh=grad)