Exemplo n.º 1
0
        shape_tensor = _ShapeTensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev,
                                              dtype=dtype,
                                              name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops._random_standard_normal(shape_tensor,
                                                     dtype,
                                                     seed=seed1,
                                                     seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        return value


ops.NoGradient("RandomStandardNormal")


def parameterized_truncated_normal(shape,
                                   means=0.0,
                                   stddevs=1.0,
                                   minvals=-2.0,
                                   maxvals=2.0,
                                   dtype=dtypes.float32,
                                   seed=None,
                                   name=None):
    """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.
Exemplo n.º 2
0
approxmatch_module = tf.load_op_library(
    osp.join(base_dir, 'tf_approxmatch_so.so'))


def approx_match(xyz1, xyz2):
    '''
input:
	xyz1 : batch_size * #dataset_points * 3
	xyz2 : batch_size * #query_points * 3
returns:
	match : batch_size * #query_points * #dataset_points
	'''
    return approxmatch_module.approx_match(xyz1, xyz2)


ops.NoGradient('ApproxMatch')


#@tf.RegisterShape('ApproxMatch')
@ops.RegisterShape('ApproxMatch')
def _approx_match_shape(op):
    shape1 = op.inputs[0].get_shape().with_rank(3)
    shape2 = op.inputs[1].get_shape().with_rank(3)
    return [tf.TensorShape([shape1.dims[0], shape2.dims[1], shape1.dims[1]])]


def match_cost(xyz1, xyz2, match):
    '''
input:
	xyz1 : batch_size * #dataset_points * 3
	xyz2 : batch_size * #query_points * 3
Exemplo n.º 3
0
def query_ball_point(radius, nsample, xyz1, xyz2):
    '''
    Input:
        radius: float32, ball search radius
        nsample: int32, number of points selected in each ball region
        xyz1: (batch_size, ndataset, 3) float32 array, input points
        xyz2: (batch_size, npoint, 3) float32 array, query points
    Output:
        idx: (batch_size, npoint, nsample) int32 array, indices to input points
        pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
    '''
    #return grouping_module.query_ball_point(radius, nsample, xyz1, xyz2)
    return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample)


ops.NoGradient('QueryBallPoint')


def select_top_k(k, dist):
    '''
    Input:
        k: int32, number of k SMALLEST elements selected
        dist: (b,m,n) float32 array, distance matrix, m query points, n dataset points
    Output:
        idx: (b,m,n) int32 array, first k in n are indices to the top k
        dist_out: (b,m,n) float32 array, first k in n are the top k
    '''
    return grouping_module.selection_sort(dist, k)


ops.NoGradient('SelectionSort')
Exemplo n.º 4
0
      A list of `Tensor` which `func` computes.
    """
    token = _py_funcs.insert(func)
    # We tie the registered function's life-time with the current
    # default graph. I.e., when the current graph is destroyed, we
    # should remove its py funcs.
    cleanup = CleanupFunc(token)
    g = ops.get_default_graph()
    # pylint: disable=protected-access
    #
    # TODO(zhifengc): Consider adding a Graph method to collect
    # `cleanup` objects in one of its member.
    if not hasattr(g, "_cleanup_py_funcs_used_in_graph"):
        g._cleanup_py_funcs_used_in_graph = []

    # When g is destroyed, elements in _cleanup_py_funcs_used_in_graph
    # will be destroyed and their __del__ will remove the 'token' from
    # the funcs registry.
    g._cleanup_py_funcs_used_in_graph.append(cleanup)

    return gen_script_ops._py_func(input=inp,
                                   token=token,
                                   Tout=Tout,
                                   name=name)
    # pylint: enable=protected-access


ops.RegisterShape("PyFunc")(common_shapes.unknown_shape)

ops.NoGradient("PyFunc")
Exemplo n.º 5
0
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sampling_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_sampling_so.so'))
def prob_sample(inp,inpr):
    '''
input:
    batch_size * ncategory float32
    batch_size * npoints   float32
returns:
    batch_size * npoints   int32
    '''
    return sampling_module.prob_sample(inp,inpr)
ops.NoGradient('ProbSample')
# TF1.0 API requires set shape in C++
#@tf.RegisterShape('ProbSample')
#def _prob_sample_shape(op):
#    shape1=op.inputs[0].get_shape().with_rank(2)
#    shape2=op.inputs[1].get_shape().with_rank(2)
#    return [tf.TensorShape([shape2.dims[0],shape2.dims[1]])]
def gather_point(inp,idx):
    '''
input:
    batch_size * ndataset * 3   float32
    batch_size * npoints        int32
returns:
    batch_size * npoints * 3    float32
    '''
    return sampling_module.gather_point(inp,idx)
Exemplo n.º 6
0
sys.path.append(BASE_DIR)
knn_module = tf.load_op_library(os.path.join(BASE_DIR, 'tf_knn.so'))


def knn(k, queries, points):
    """

    :param queries: (N, P_queries, C)
    :param points:  (N, P_points, C)
    :param k:   int
    :return:    (N, P_queries, k, dis),  (N, P_queries, k, indices)
    """

    return knn_module.my_knn(k=k, queries=queries, points=points)

ops.NoGradient('MyKnn')


def farthest_point_sample(npoint,inp):
    '''
input:
    int32
    batch_size * ndataset * 3   float32
returns:
    batch_size * npoint         int32
    '''
    return knn_module.farthest_point_sample(inp, npoint)
ops.NoGradient('FarthestPointSample')

if __name__ == '__main__':
    batch_size = 8
Exemplo n.º 7
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops

ops.NoGradient("SparseAddGrad")
ops.NoGradient("SparseConcat")
ops.NoGradient("SparseToDense")


@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
    """Gradients for the SparseReorder op.

  Args:
    op: the SparseReorder op
    unused_output_indices_grad: the incoming gradients of the output indices
    output_values_grad: the incoming gradients of the output values

  Returns:
    Gradient for each of the 3 input tensors:
Exemplo n.º 8
0
sys.path.append(BASE_DIR)

DFCN_module = tf.load_op_library(os.path.join(BASE_DIR, 'tf_DFCN_so.so'))


def DFCN_select(xyz, radius):
    """
    :param xyz: (b, n, 3) float
    :param radius: float
    :return: (b, n, 8) int
    """
    idx = DFCN_module.cube_select(xyz, radius)
    return idx


ops.NoGradient('CubeSelect')


def DFCN_select_two(xyz, radius):
    """
    :param xyz: (b, n, 3) float
    :param radius:  float
    :return: idx: (b, n, 16) int
    """
    idx = DFCN_module.cube_select_two(xyz, radius)
    return idx


ops.NoGradient('CubeSelectTwo')

Exemplo n.º 9
0
        shape_tensor = _ShapeTensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev,
                                              dtype=dtype,
                                              name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops._random_standard_normal(shape_tensor,
                                                     dtype,
                                                     seed=seed1,
                                                     seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        return value


ops.NoGradient("RandomStandardNormal")


def truncated_normal(shape,
                     mean=0.0,
                     stddev=1.0,
                     dtype=dtypes.float32,
                     seed=None,
                     name=None):
    """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
Exemplo n.º 10
0
    resource_loader.get_path_to_datafile('_interpolate_ops.so'))


def three_nn(xyz1, xyz2):
    '''
    Input:
        xyz1: (b,n,3) float32 array, unknown points
        xyz2: (b,m,3) float32 array, known points
    Output:
        dist: (b,n,3) float32 array, distances to known points
        idx: (b,n,3) int32 array, indices to known points
    '''
    return interpolate_ops.three_nn(xyz1, xyz2)


ops.NoGradient('ThreeNN')


def three_interpolate(points, idx, weight):
    '''
    Input:
        points: (b,m,c) float32 array, known points
        idx: (b,n,3) int32 array, indices to known points
        weight: (b,n,3) float32 array, weights on known points
    Output:
        out: (b,n,c) float32 array, interpolated point values
    '''
    return interpolate_ops.three_interpolate(points, idx, weight)


@ops.RegisterGradient('ThreeInterpolate')
Exemplo n.º 11
0
_bucketization_op = load_library.load_op_library(
    resource_loader.get_path_to_datafile("_bucketization_op.so"))
assert _bucketization_op, "Could not load _bucketization_op.so."


def bucketize(input_tensor, boundaries, name=None):
    """Bucketizes input_tensor by given boundaries.

  See bucketize_op.cc for more details.

  Args:
    input_tensor: A `Tensor` which will be bucketize.
    boundaries: A list of floats gives the boundaries. It has to be sorted.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `Tensor` with type int32 which indicates the corresponding bucket for
      each value in `input_tensor`.

  Raises:
    TypeError: If boundaries is not a list.
  """
    if not isinstance(boundaries, list):
        raise TypeError("boundaries must be a list")

    return _bucketization_op.bucketize(input_tensor, boundaries, name=name)


ops.NoGradient("Bucketize")
Exemplo n.º 12
0
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops

ops.NoGradient("CholeskyGrad")
ops.NoGradient("BatchCholeskyGrad")


@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
    """Gradient for MatrixInverse."""
    ainv = op.outputs[0]
    return -math_ops.matmul(
        ainv, math_ops.matmul(grad, ainv, transpose_b=True), transpose_a=True)


@ops.RegisterGradient("BatchMatrixInverse")
def _BatchMatrixInverseGrad(op, grad):
    """Gradient for BatchMatrixInverse."""
    ainv = op.outputs[0]
Exemplo n.º 13
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in state_ops.py."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops

ops.NoGradient("Assign")

ops.NoGradient("AssignAdd")

ops.NoGradient("AssignSub")

ops.NoGradient("ScatterAdd")

ops.NoGradient("ScatterSub")
Exemplo n.º 14
0
# Compared to approxmatch (which implements ordindary EMD) we append a shift variable to each match. We need one 3D 
# array with offsets for cloud xyz1 and one 3D aray for xyz2. A full matrix has redundant information for each 
# pair. Only the result variable becomes than more complex. It would be of size [b * n * m + b * n + b * m] with
# b = batch size, n = #dataset points, and m = #query points. In other words, we need to return multiple variables.
def multi_emd(xyz1,xyz2):
	'''
input:
	xyz1    : batch_size * #dataset_points * 3
	xyz2    : batch_size * #query_points * 3
returns:
	match   : batch_size * #query_points * #dataset_points 
	offset1 : batch_size * #dataset_points * 3
	offset2 : batch_size * #query_points * 3
	'''
	return multiemd_module.multi_emd(xyz1,xyz2)
ops.NoGradient('MultiEmd')
#@tf.RegisterShape('MultiEmd')
@ops.RegisterShape('MultiEmd')
def _multi_emd_shape(op):
	shape1=op.inputs[0].get_shape().with_rank(3)
	shape2=op.inputs[1].get_shape().with_rank(3)
	return [
		tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[1]]), 
		tf.TensorShape([shape1.dims[0],shape1.dims[1],shape1.dims[2]]), 
		tf.TensorShape([shape1.dims[0],shape2.dims[1],shape2.dims[2]])
		]

def multi_emd_cost(xyz1,xyz2,match,offset1,offset2):
	'''
input:
	xyz1    : batch_size * #dataset_points * 3
Exemplo n.º 15
0
from tensorflow.keras.layers import MaxPool1D, Layer

sampling_module = tf.load_op_library(
    "/pointnet2/tf_ops/sampling/tf_sampling_so.so")
grouping_module = tf.load_op_library(
    "/pointnet2/tf_ops/grouping/tf_grouping_so.so")
interpolate_module = tf.load_op_library(
    "/pointnet2/tf_ops/3d_interpolation/tf_interpolate_so.so")


def prob_sample(inp, inpr):
    return sampling_module.prob_sample(inp, inpr)


ops.NoGradient("ProbSample")


def gather_point(inp, idx):
    return sampling_module.gather_point(inp, idx)


@tf.RegisterGradient("GatherPoint")
def _gather_point_grad(op, out_g):
    inp = op.inputs[0]
    idx = op.inputs[1]
    return [sampling_module.gather_point_grad(inp, idx, out_g), None]


def farthest_point_sample(npoint, inp):
    return sampling_module.farthest_point_sample(inp, npoint)
Exemplo n.º 16
0
def _DynamicStitchGrads(op, grad):
  """Gradients for DynamicStitch."""

  num_values = len(op.inputs) // 2
  indices_grad = [None] * num_values

  def AsInt32(x):
    return (x if op.inputs[0].dtype == dtypes.int32 else
            math_ops.cast(x, dtypes.int32))
  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
  if isinstance(grad, ops.IndexedSlices):
    output_shape = array_ops.shape(op.outputs[0])
    output_rows = output_shape[0]
    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
  return indices_grad + values_grad


ops.NoGradient("Queue")
ops.NoGradient("QueueEnqueue")
ops.NoGradient("QueueEnqueueMany")
ops.NoGradient("QueueDequeue")
ops.NoGradient("QueueDequeueMany")
ops.NoGradient("QueueClose")
ops.NoGradient("QueueSize")

ops.NoGradient("Stack")
ops.NoGradient("StackPush")
ops.NoGradient("StackPop")
ops.NoGradient("StackClose")
Exemplo n.º 17
0
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable


ops.NoGradient("DecodeRaw")
ops.NoGradient("ParseTensor")
ops.NoGradient("StringToNumber")


class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
  """Configuration for parsing a variable-length input feature.

  Fields:
    dtype: Data type of input.
  """
  pass


class FixedLenFeature(collections.namedtuple(
    "FixedLenFeature", ["shape", "dtype", "default_value"])):
Exemplo n.º 18
0
from __future__ import print_function

import os
import threading

import tensorflow as tf

from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape

TRAINING_OPS_FILE = '_training_ops.so'

_training_ops = None
_ops_lock = threading.Lock()

ops.NoGradient('CountExtremelyRandomStats')
ops.NoGradient('SampleInputs')
ops.NoGradient('BestSplits')
ops.NoGradient('GrowTree')
ops.NoGradient('FinishedNodes')
ops.NoGradient('ScatterAddNdim')
ops.NoGradient('UpdateFertileSlots')


@ops.RegisterShape('CountExtremelyRandomStats')
def _CountExtremelyRandomStatsShape(op):
    """Shape function for CountExtremelyRandomStats Op."""
    num_points = op.inputs[0].get_shape()[0].value
    num_nodes = op.inputs[2].get_shape()[0].value
    num_classes = op.get_attr('num_classes')
    # The output of TraverseTree is [leaf_node_index(x) for x in input_data].
Exemplo n.º 19
0
                                         grad.indices < end)),
                                                      squeeze_dims=[1])
                new_indices = array_ops.gather(grad.indices,
                                               indices_to_select) - start
                new_values = array_ops.gather(grad.values, indices_to_select)
                out_grads.append(
                    ops.IndexedSlices(new_values, new_indices, size))
                start = end
    else:
        raise TypeError("Expected Tensor or IndexedSlices, got %s" %
                        type(grad))

    return [None] + out_grads


ops.NoGradient("ConcatOffset")


@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
    """Gradient for Slice op."""
    # Create an Nx2 padding where the first column represents how many
    # zeros are to be prepended for each dimension, and the second
    # column indicates how many zeros are appended.
    #
    # The number of zeros to append is the shape of the input
    # elementwise-subtracted by both the begin vector and sizes vector.
    #
    # Some more reshaping is needed to assemble this tensor with the
    # right dimensions.
    input_vec = op.inputs[0]
Exemplo n.º 20
0
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops

# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_image_ops import *
from tensorflow.python.ops.gen_attention_ops import *
# pylint: enable=wildcard-import

ops.NoGradient('RandomCrop')
ops.NoGradient('RGBToHSV')
ops.NoGradient('HSVToRGB')


def _ImageDimensions(images):
    """Returns the dimensions of an image tensor.

  Args:
    images: 4-D Tensor of shape [batch, height, width, channels]

  Returns:
    list of integers [batch, height, width, channels]
  """
    # A simple abstraction to provide names for each dimension. This abstraction
    # should make it simpler to switch dimensions in the future (e.g. if we ever
Exemplo n.º 21
0
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable

ops.NoGradient("DecodeRaw")
ops.NoGradient("StringToNumber")


class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
    """Configuration for parsing a variable-length input feature.

  Fields:
    dtype: Data type of input.
  """
    pass


class FixedLenFeature(
        collections.namedtuple("FixedLenFeature",
                               ["shape", "dtype", "default_value"])):
Exemplo n.º 22
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops

ops.NoGradient("TensorArray")
ops.NoGradient("TensorArrayGrad")
ops.NoGradient("TensorArraySize")
ops.NoGradient("TensorArrayClose")


def _GetGradSource(op_or_tensor):
    """Identify which call to tf.gradients created this gradient op or tensor.

  TensorArray gradient calls use an accumulator TensorArray object.  If
  multiple gradients are calculated and run in the same session, the multiple
  gradient nodes may accidentally flow throuth the same accumulator TensorArray.
  This double counting breaks the TensorArray gradient flow.

  The solution is to identify which gradient call this particular
  TensorArray*Grad is being called in, by looking at the input gradient
Exemplo n.º 23
0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
auctionmatch_module = tf.load_op_library(os.path.join(BASE_DIR, 'tf_auctionmatch_so.so'))
sys.path.append("../..")

def auction_match(xyz1,xyz2):
	'''
input:
	xyz1 : batch_size * #points * 3
	xyz2 : batch_size * #points * 3
returns:
	matchl : batch_size * #npoints
	matchr : batch_size * #npoints
	'''
	return auctionmatch_module.auction_match(xyz1,xyz2)
ops.NoGradient('AuctionMatch')

# TF1.0 API requires set shape in C++
# @tf.RegisterShape('AuctionMatch')
# def _auction_match_shape(op):
# 	shape1=op.inputs[0].get_shape().with_rank(3)
# 	shape2=op.inputs[1].get_shape().with_rank(3)
# 	return [
# 		tf.TensorShape([shape1.dims[0],shape1.dims[1]]),
# 		tf.TensorShape([shape2.dims[0],shape2.dims[1]])
# 	]

if __name__=='__main__':
    from tf_ops.grouping import tf_grouping
    from tf_ops.sampling import tf_sampling
Exemplo n.º 24
0
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops

# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_image_ops import *
# pylint: enable=wildcard-import

from tensorflow.python.util.all_util import make_all

ops.NoGradient('RandomCrop')
ops.NoGradient('RGBToHSV')
ops.NoGradient('HSVToRGB')
ops.NoGradient('DrawBoundingBoxes')
ops.NoGradient('SampleDistortedBoundingBox')
# TODO(bsteiner): Implement the gradient function for extract_glimpse
ops.NoGradient("ExtractGlimpse")


def _ImageDimensions(images):
    """Returns the dimensions of an image tensor.

  Args:
    images: 4-D Tensor of shape [batch, height, width, channels]

  Returns:
Exemplo n.º 25
0
    collections: Optional list of graph collections keys. The new summary op is
      added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
    name: A name for the operation (optional).

  Returns:
    A scalar `Tensor` of type `string`. The serialized `Summary` protocol
    buffer.
  """
    with ops.op_scope([tags, values], name, "ScalarSummary") as scope:
        val = gen_summary_ops._scalar_summary(tags=tags,
                                              values=values,
                                              name=scope)
        _Collect(val, collections, [ops.GraphKeys.SUMMARIES])
    return val


ops.NoGradient("HistogramAccumulatorSummary")
ops.NoGradient("HistogramSummary")
ops.NoGradient("ImageSummary")
ops.NoGradient("MergeSummary")
ops.NoGradient("ScalarSummary")


@ops.RegisterShape("HistogramAccumulatorSummary")
@ops.RegisterShape("HistogramSummary")
@ops.RegisterShape("ImageSummary")
@ops.RegisterShape("MergeSummary")
@ops.RegisterShape("ScalarSummary")
def _ScalarShape(unused_op):
    return [tensor_shape.scalar()]
Exemplo n.º 26
0
    """Returns an Op that initializes all tables of the default graph.

  Args:
    name: Optional name for the initialization op.

  Returns:
    An Op that initializes all tables.  Note that if there are
    not tables the returned Op is a NoOp.
  """
    initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
    if initializers:
        return control_flow_ops.group(*initializers, name=name)
    return control_flow_ops.no_op(name=name)


ops.NoGradient("LookupTableFind")
ops.NoGradient("LookupTableInsert")
ops.NoGradient("LookupTableSize")
ops.NoGradient("HashTable")
ops.NoGradient("InitializeTable")
ops.NoGradient("InitializeTableFromTextFile")
ops.NoGradient("MutableHashTable")

ops.RegisterShape("QueueSize")(common_shapes.scalar_shape)
ops.RegisterShape("Queue")(common_shapes.scalar_shape)
ops.RegisterShape("FIFOQueue")(common_shapes.scalar_shape)
ops.RegisterShape("PaddingFIFOQueue")(common_shapes.scalar_shape)
ops.RegisterShape("RandomShuffleQueue")(common_shapes.scalar_shape)


def _ScalarToVoidShape(op):
Exemplo n.º 27
0
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  # .op works with Tensors or IndexedSlices
  with ops.control_dependencies([grad.op]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients.
ops.NoGradient("Less")
ops.NoGradient("LessEqual")
ops.NoGradient("Greater")
ops.NoGradient("GreaterEqual")
ops.NoGradient("Equal")
ops.NoGradient("NotEqual")
ops.NoGradient("LogicalAnd")
ops.NoGradient("LogicalOr")
ops.NoGradient("LogicalNot")


@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
  c = op.inputs[0]
  x = op.inputs[1]
  zeros = array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
Exemplo n.º 28
0
import cslab_environ

from tensorflow.python.framework import ops
import tfplus.utils.logger as logger
import numpy as np
import tensorflow as tf

hungarian_module = None

log = logger.get()


# Register gradient for Hungarian algorithm.
ops.NoGradient("Hungarian")


def get_device_fn(device):
    """Choose device for different ops."""
    OPS_ON_CPU = set(['ResizeBilinear', 'ResizeBilinearGrad', 'Mod', 'CumMin',
                      'CumMinGrad', 'Hungarian', 'Reverse', 'SparseToDense',
                      'BatchMatMul', 'Gather', 'Print'])

    def _device_fn(op):
        if op.type in OPS_ON_CPU:
            return "/cpu:0"
        else:
            # Other ops will be placed on GPU if available, otherwise CPU.
            return device

    return _device_fn
Exemplo n.º 29
0
    """Whether the Reader implementation can serialize its state."""
    return self._supports_serialize

  def reset(self, name=None):
    """Restore a reader to its initial clean state.

    Args:
      name: A name for the operation (optional).

    Returns:
      The created Operation.
    """
    return gen_io_ops._reader_reset(self._reader_ref, name=name)


ops.NoGradient("ReaderRead")
ops.NoGradient("ReaderNumRecordsProduced")
ops.NoGradient("ReaderNumWorkUnitsCompleted")
ops.NoGradient("ReaderSerializeState")
ops.NoGradient("ReaderRestoreState")
ops.NoGradient("ReaderReset")


class WholeFileReader(ReaderBase):
  """A Reader that outputs the entire contents of a file as a value.

  To use, enqueue filenames in a Queue.  The output of Read will
  be a filename (key) and the contents of that file (value).

  See ReaderBase for supported methods.
  """
Exemplo n.º 30
0
    os.path.join(BASE_DIR, "build", "libtf_interpolate.so"))


def three_nn(xyz1, xyz2):
    """
    Input:
        xyz1: (b,n,3) float32 array, unknown points
        xyz2: (b,m,3) float32 array, known points
    Output:
        dist: (b,n,3) float32 array, distances to known points
        idx: (b,n,3) int32 array, indices to known points
    """
    return interpolate_module.three_nn(xyz1, xyz2)


ops.NoGradient("ThreeNN")


def interpolate_label(sparse_points, sparse_labels, dense_points, knn):
    """
    Input:
        sparse_points: (num_sparse_points, 3) float32 array, points
                      with known labels
        sparse_labels: (num_sparse_points, 3) float32 array, labels of
                      sparse_points
        dense_points: (num_dense_points, 3) float32 array, points
                      with unknown labels
        knn: int, use k-NN for label interpolation
    Output:
        dense_labels:  (num_dense_points,) int32 array, indices
        dense_colors:  (num_dense_points, 3) uint8 array, colors for dense_labels