Beispiel #1
0
    rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
    number of unique elements in the corresponding `[0...n-1]` dimension of `a`.

  Raises:
    TypeError: If `a` is an invalid types.
  """
    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
    if not isinstance(a, sparse_tensor.SparseTensor):
        raise TypeError("Expected `SparseTensor`, got %s." % a)
    if a.values.dtype.base_dtype not in _VALID_DTYPES:
        raise TypeError("Invalid dtype %s." % a.values.dtype)
    # pylint: disable=protected-access
    return gen_set_ops.set_size(a.indices, a.values, a.shape, validate_indices)


ops.NotDifferentiable("SetSize")

ops.NotDifferentiable("DenseToDenseSetOperation")
ops.NotDifferentiable("DenseToSparseSetOperation")
ops.NotDifferentiable("SparseToSparseSetOperation")


def _set_operation(a, b, set_operation, validate_indices=True):
    """Compute set operation of elements in last dimension of `a` and `b`.

  All but the last dimension of `a` and `b` must match.

  Args:
    a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
        must be sorted in row-major order.
    b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
Beispiel #2
0
    def reset(self, name=None):
        """Restore a reader to its initial clean state.

    Args:
      name: A name for the operation (optional).

    Returns:
      The created Operation.
    """
        if self._reader_ref.dtype == dtypes.resource:
            return gen_io_ops._reader_reset_v2(self._reader_ref, name=name)
        else:
            return gen_io_ops._reader_reset(self._reader_ref, name=name)


ops.NotDifferentiable("ReaderRead")
ops.NotDifferentiable("ReaderReadUpTo")
ops.NotDifferentiable("ReaderNumRecordsProduced")
ops.NotDifferentiable("ReaderNumWorkUnitsCompleted")
ops.NotDifferentiable("ReaderSerializeState")
ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")


class WholeFileReader(ReaderBase):
    """A Reader that outputs the entire contents of a file as a value.

  To use, enqueue filenames in a Queue.  The output of Read will
  be a filename (key) and the contents of that file (value).

  See ReaderBase for supported methods.
Beispiel #3
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import csv
import tensorflow as tf

from tensorflow.python.framework import random_seed
from tensorflow.python.framework import ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.platform import resource_loader

skip_gram_ops = tf.load_op_library(
    resource_loader.get_path_to_datafile("_skip_gram_ops.so"))

ops.NotDifferentiable("SkipGramGenerateCandidates")


def skip_gram_sample(input_tensor,
                     min_skips=1,
                     max_skips=5,
                     start=0,
                     limit=-1,
                     emit_self_as_target=False,
                     vocab_freq_table=None,
                     vocab_min_count=None,
                     vocab_subsampling=None,
                     corpus_size=None,
                     batch_size=None,
                     batch_capacity=None,
                     seed=None,
Beispiel #4
0
    date=None,
    instructions="""tf.py_func is deprecated in TF V2. Instead, there are two
    options available in V2.
    - tf.py_function takes a python function which manipulates tf eager
    tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
    an ndarray (just call tensor.numpy()) but having access to eager tensors
    means `tf.py_function`s can use accelerators such as GPUs as well as
    being differentiable using a gradient tape.
    - tf.numpy_function maintains the semantics of the deprecated tf.py_func
    (it is not differentiable, and manipulates numpy arrays). It drops the
    stateful argument making all functions stateful.
    """)
@tf_export(v1=["py_func"])
def py_func(func, inp, Tout, stateful=True, name=None):
    return py_func_common(func, inp, Tout, stateful, name=name)


py_func.__doc__ = "%s" % py_func_common.__doc__


@tf_export("numpy_function")
def numpy_function(func, inp, Tout, name=None):
    return py_func_common(func, inp, Tout, stateful=True, name=name)


numpy_function.__doc__ = py_func_common.__doc__.replace(
    "py_func", "numpy_function")

ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
Beispiel #5
0
  Returns:
    A tuple `(decoded, log_probabilities)` where
    decoded: A list of length top_paths, where `decoded[j]`
      is a `SparseTensor` containing the decoded outputs:
      `decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)`
        The rows store: [batch, time].
      `decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`.
        The vector stores the decoded classes for beam j.
      `decoded[j].shape`: Shape vector, size `(2)`.
        The shape values are: `[batch_size, max_decoded_length[j]]`.
    log_probability: A `float` matrix `(batch_size x top_paths)` containing
        sequence log-probabilities.
  """

    decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
        gen_ctc_ops._ctc_beam_search_decoder(inputs,
                                             sequence_length,
                                             beam_width=beam_width,
                                             top_paths=top_paths,
                                             merge_repeated=merge_repeated))

    return ([
        sparse_tensor.SparseTensor(ix, val, shape)
        for (ix, val, shape) in zip(decoded_ixs, decoded_vals, decoded_shapes)
    ], log_probabilities)


ops.NotDifferentiable("CTCGreedyDecoder")

ops.NotDifferentiable("CTCBeamSearchDecoder")
        table_id=self._table_id,
        timestamp_millis=self._timestamp_millis,
        columns=self._columns,
        test_end_point=self._test_end_point)
    super(BigQueryReader, self).__init__(reader)

  def partitions(self, name=None):
    """Returns serialized BigQueryTablePartition messages.

    These messages represent a non-overlapping division of a table for a
    bulk read.

    Args:
      name: a name for the operation (optional).

    Returns:
      `1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.
    """
    return gen_bigquery_reader_ops.generate_big_query_reader_partitions(
        name=name,
        project_id=self._project_id,
        dataset_id=self._dataset_id,
        table_id=self._table_id,
        timestamp_millis=self._timestamp_millis,
        num_partitions=self._num_partitions,
        test_end_point=self._test_end_point,
        columns=self._columns)


ops.NotDifferentiable("BigQueryReader")
Beispiel #7
0
                             grad,
                             start_value_index=1,
                             end_value_index=len(op.inputs),
                             dim_index=0)


@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
    return _ConcatGradHelper(op,
                             grad,
                             start_value_index=0,
                             end_value_index=-1,
                             dim_index=-1)


ops.NotDifferentiable("ConcatOffset")


@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
    """Gradient for Slice op."""
    # Create an Nx2 padding where the first column represents how many
    # zeros are to be prepended for each dimension, and the second
    # column indicates how many zeros are appended.
    #
    # The number of zeros to append is the shape of the input
    # elementwise-subtracted by both the begin vector and sizes vector.
    #
    # Some more reshaping is needed to assemble this tensor with the
    # right dimensions.
    input_vec = op.inputs[0]
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables


ops.NotDifferentiable('RandomCrop')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('RGBToHSV')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('HSVToRGB')
ops.NotDifferentiable('DrawBoundingBoxes')
ops.NotDifferentiable('SampleDistortedBoundingBox')
# TODO(bsteiner): Implement the gradient function for extract_glimpse
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')

Beispiel #9
0
        # Map each nonzero id to consecutive values.
        nonzero_consecutive_ids = math_ops.range(
            array_ops.shape(unique_ids)[0] -
            array_ops.shape(id_is_zero)[0]) + 1

        def no_zero():
            # No need to insert a zero into the ids.
            return nonzero_consecutive_ids

        def has_zero():
            # Insert a zero in the consecutive ids where zero appears in unique_ids.
            # id_is_zero has length 1.
            zero_id_ind = math_ops.cast(id_is_zero[0], dtypes.int32)
            ids_before = nonzero_consecutive_ids[:zero_id_ind]
            ids_after = nonzero_consecutive_ids[zero_id_ind:]
            return array_ops.concat([ids_before, [0], ids_after], axis=0)

        new_ids = control_flow_ops.cond(
            math_ops.equal(array_ops.shape(id_is_zero)[0], 0), no_zero,
            has_zero)
        components = array_ops.reshape(array_ops.gather(new_ids, id_index),
                                       array_ops.shape(components))
        if len(image_or_images.get_shape()) == 2:
            return components[0, :, :]
        else:
            return components


ops.NotDifferentiable("BipartiteMatch")
ops.NotDifferentiable("ImageConnectedComponents")
Beispiel #10
0
      image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be
      updated to 3 if number_colors > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`.
      Defaults to `[1022, 757]`. Size of "DATA" window, must be equal to or
      smaller than `output_image_shape`, will be centered and use
      `convergence_dots_size` for best fit to avoid overlap if possible

  Returns:
    A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded
    'depth_values'
  """

    result = gen_single_image_random_dot_stereograms_ops.single_image_random_dot_stereograms(  # pylint: disable=line-too-long
        depth_values=depth_values,
        hidden_surface_removal=hidden_surface_removal,
        convergence_dots_size=convergence_dots_size,
        dots_per_inch=dots_per_inch,
        eye_separation=eye_separation,
        mu=mu,
        normalize=normalize,
        normalize_max=normalize_max,
        normalize_min=normalize_min,
        border_level=border_level,
        number_colors=number_colors,
        output_image_shape=output_image_shape,
        output_data_window=output_data_window)
    return result


ops.NotDifferentiable("SingleImageRandomDotStereograms")
# pylint: enable=unused-import

from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking


_stats_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_stats_ops.so"))


ops.NotDifferentiable("FertileStatsVariable")
ops.NotDifferentiable("FertileStatsSerialize")
ops.NotDifferentiable("FertileStatsDeserialize")
ops.NotDifferentiable("GrowTreeV4")
ops.NotDifferentiable("ProcessInputV4")
ops.NotDifferentiable("FinalizeTree")


class FertileStatsVariableSavable(saver.BaseSaverBuilder.SaveableObject):
  """SaveableObject implementation for FertileStatsVariable."""

  def __init__(self, params, stats_handle, create_op, name):
    """Creates a FertileStatsVariableSavable object.

    Args:
      params: A TensorForestParams object.
Beispiel #12
0
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_predictions_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_size
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import update_model_v4
# pylint: enable=unused-import

from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking

_model_ops = loader.load_op_library(
    resource_loader.get_path_to_datafile("_model_ops.so"))

ops.NotDifferentiable("TreeVariable")
ops.NotDifferentiable("TreeSerialize")
ops.NotDifferentiable("TreeDeserialize")
ops.NotDifferentiable("TreeSize")
ops.NotDifferentiable("TreePredictionsV4")
ops.NotDifferentiable("FeatureUsageCounts")


class TreeVariableSavable(saver.BaseSaverBuilder.SaveableObject):
    """SaveableObject implementation for TreeVariable."""
    def __init__(self, params, tree_handle, stats_handle, create_op, name):
        """Creates a TreeVariableSavable object.

    Args:
      params: A TensorForestParams object.
      tree_handle: handle to the tree variable.
    def AsInt32(x):
        return (x if op.inputs[0].dtype == dtypes.int32 else math_ops.cast(
            x, dtypes.int32))

    inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
    if isinstance(grad, ops.IndexedSlices):
        output_shape = array_ops.shape(op.outputs[0])
        output_rows = output_shape[0]
        grad = math_ops.unsorted_segment_sum(grad.values, grad.indices,
                                             output_rows)
    values_grad = [array_ops.gather(grad, inp) for inp in inputs]
    return indices_grad + values_grad


ops.NotDifferentiable("Queue")
ops.NotDifferentiable("QueueEnqueue")
ops.NotDifferentiable("QueueEnqueueMany")
ops.NotDifferentiable("QueueDequeue")
ops.NotDifferentiable("QueueDequeueMany")
ops.NotDifferentiable("QueueDequeueUpTo")
ops.NotDifferentiable("QueueClose")
ops.NotDifferentiable("QueueSize")

ops.NotDifferentiable("Stack")
ops.NotDifferentiable("StackPush")
ops.NotDifferentiable("StackPop")
ops.NotDifferentiable("StackClose")

ops.NotDifferentiable("GetSessionHandle")
ops.NotDifferentiable("GetSessionHandleV2")
    """An op which sums an input tensor over all the Horovod processes.

    The reduction operation is keyed by the name of the op. The tensor type and
    shape must be the same on all Horovod processes for a given name. The reduction
    will not start until all processes are ready to send and receive the tensor.

    Returns:
      A tensor of the same shape and type as `tensor`, summed across all
      processes.
    """
    if name is None:
        name = 'HorovodAllreduce_%s' % _normalize_name(tensor.name)
    return MPI_LIB.horovod_allreduce(tensor, name=name)


ops.NotDifferentiable('HorovodAllreduce')


def allgather(tensor, name=None):
    """An op which concatenates the input tensor with the same input tensor on
    all other Horovod processes.

    The concatenation is done on the first dimension, so the input tensors on the
    different processes must have the same rank and shape, except for the first
    dimension, which is allowed to be different.

    Returns:
      A tensor of the same type as `tensor`, concatenated on dimension zero
      across all processes. The shape is identical to the input shape, except for
      the first dimension, which may be greater and is the sum of all first
      dimensions of the tensors in different Horovod processes.
Beispiel #15
0
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops

# TODO (b/31222613): This op may be differentiable, and there may be id:3441 gh:3442
# latent bugs here.
ops.NotDifferentiable("SparseAddGrad")
ops.NotDifferentiable("SparseConcat")
ops.NotDifferentiable("SparseToDense")


@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
    """Gradients for the SparseReorder op.

  Args:
    op: the SparseReorder op
    unused_output_indices_grad: the incoming gradients of the output indices
    output_values_grad: the incoming gradients of the output values

  Returns:
    Gradient for each of the 3 input tensors:
Beispiel #16
0
    ]
    return dims


_cuda_op_module_v2_sz224 = tf.load_op_library(
    os.path.join(tf.resource_loader.get_data_files_path(),
                 'TF_newop/cuda_op_kernel_v2_sz224.so'))
zbuffer_tri_v2_sz224 = _cuda_op_module_v2_sz224.zbuffer_tri_v2_sz224


def ZBuffer_Rendering_CUDA_op_v2_sz224(s2d, tri, vis):
    tri_map, zbuffer = zbuffer_tri_v2_sz224(s2d, tri, vis)
    return tri_map, zbuffer


ops.NotDifferentiable("ZbufferTriV2Sz224")


def warp_texture(texture, m, mshape, output_size=224):
    def flatten(x):
        return tf.reshape(x, [-1])

    n_size = get_shape(texture)
    n_size = n_size[0]

    s = output_size

    # Tri, tri2vt
    tri = load_3DMM_tri()
    vertex_tri = load_3DMM_vertex_tri()
    vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel()
Beispiel #17
0
primitive_consistency_loss_v2_grad = _primitive_gen_module.primitive_consistency_loss_v2_grad
primitive_symmetry_loss_v3_grad = _primitive_gen_module.primitive_symmetry_loss_v3_grad
primitive_aligning_loss_v2_grad = _primitive_gen_module.primitive_aligning_loss_v2_grad
primitive_cube_area_average_loss_grad = _primitive_gen_module.primitive_cube_area_average_loss_grad

# mask prediction
primitive_coverage_split_loss_v3 = _primitive_gen_module.primitive_coverage_split_loss_v3
primitive_consistency_split_loss = _primitive_gen_module.primitive_consistency_split_loss
primitive_tree_generation = _primitive_gen_module.primitive_tree_generation
primitive_cube_coverage_loss_v4 = _primitive_gen_module.primitive_cube_coverage_loss_v4

primitive_coverage_split_loss_v3_grad = _primitive_gen_module.primitive_coverage_split_loss_v3_grad
primitive_consistency_split_loss_grad = _primitive_gen_module.primitive_consistency_split_loss_grad
primitive_cube_coverage_loss_v4_grad = _primitive_gen_module.primitive_cube_coverage_loss_v4_grad

ops.NotDifferentiable('OctreeDatabase')
ops.NotDifferentiable('PtimitiveGroupPointsV3')
ops.NotDifferentiable('PrimitiveCubeVolumeV2')
ops.NotDifferentiable('PrimitivePointsSuffixIndex')
ops.NotDifferentiable('PrimitiveTreeGeneration')


@ops.RegisterGradient('OctreeConv')
def _OctreeConvGrad(op, grad):
    return octree_conv_grad(grad,
                            op.inputs[0],
                            op.inputs[1],
                            op.inputs[2],
                            op.get_attr('curr_depth'),
                            op.get_attr('num_output'),
                            op.get_attr('kernel_size'),
Beispiel #18
0
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_deserialize
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_serialize
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_stamp_token
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_used_handlers
# pylint: enable=unused-import

from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.training import saver
from tensorflow.python.training.checkpointable import tracking

ops.NotDifferentiable("TreeEnsembleVariable")
ops.NotDifferentiable("TreeEnsembleSerialize")
ops.NotDifferentiable("TreeEnsembleDeserialize")


class TreeEnsembleVariableSavable(saver.BaseSaverBuilder.SaveableObject):
    """SaveableObject implementation for TreeEnsembleVariable."""
    def __init__(self, tree_ensemble_handle, create_op, name):
        """Creates a TreeEnsembleVariableSavable object.

    Args:
      tree_ensemble_handle: handle to the tree ensemble variable.
      create_op: the op to initialize the variable.
      name: the name to save the tree ensemble variable under.
    """
        stamp_token, ensemble_config = tree_ensemble_serialize(
Beispiel #19
0
from tensorflow.python.util.deprecation import deprecated

_libsvm_ops_so = loader.load_op_library(
    resource_loader.get_path_to_datafile("_libsvm_ops.so"))


@deprecated(None,
            'tf.contrib.libsvm will be removed in 2.0, the support for libsvm '
            'format will continue to be provided in tensorflow-io: '
            'https://github.com/tensorflow/io')
def decode_libsvm(content, num_features, dtype=None, label_dtype=None):
    """Convert Libsvm records to a tensor of label and a tensor of feature.

  Args:
    content: A `Tensor` of type `string`. Each string is a record/row in
      the Libsvm format.
    num_features: The number of features.
    dtype: The type of the output feature tensor. Default to tf.float32.
    label_dtype: The type of the output label tensor. Default to tf.int64.

  Returns:
    features: A `SparseTensor` of the shape `[input_shape, num_features]`.
    labels: A `Tensor` of the same shape as content.
  """
    labels, indices, values, shape = gen_libsvm_ops.decode_libsvm(
        content, num_features, dtype=dtype, label_dtype=label_dtype)
    return sparse_tensor.SparseTensor(indices, values, shape), labels


ops.NotDifferentiable("DecodeLibSVM")
Beispiel #20
0
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import

ops.NotDifferentiable("TensorListConcat")
ops.NotDifferentiable("TensorListPushBackBatch")


@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
    return gen_list_ops.tensor_list_pop_back(
        dresult, element_dtype=op.get_attr("element_dtype"))


@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
    if dlist is None:
        dlist = gen_list_ops.empty_tensor_list(
            element_dtype=delement.dtype,
            element_shape=gen_list_ops.tensor_list_element_shape(
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export


ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomBinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomPoisson")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessRandomUniformFullInt")
ops.NotDifferentiable("StatelessTruncatedNormal")


ops.NotDifferentiable("StatelessRandomNormalV2")
ops.NotDifferentiable("StatelessRandomUniformV2")
ops.NotDifferentiable("StatelessRandomUniformIntV2")
ops.NotDifferentiable("StatelessRandomUniformFullIntV2")
ops.NotDifferentiable("StatelessTruncatedNormalV2")
Beispiel #22
0
        return math_ops.range(array_ops.rank(x) - 1, -1, -1)


def reduce_join(inputs,
                axis=None,
                keep_dims=False,
                separator="",
                name=None,
                reduction_indices=None):
    reduction_indices = _reduce_join_reduction_dims(inputs, axis,
                                                    reduction_indices)
    return gen_string_ops.reduce_join(inputs=inputs,
                                      reduction_indices=reduction_indices,
                                      keep_dims=keep_dims,
                                      separator=separator,
                                      name=name)


reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
    gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")

ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
Beispiel #23
0
        normalized: boolean. If True, dct coefficients are normalized with quantification tables.
                    If False, no normalization is performed.
        channels: number of color channels for the decoded image.

    Output
       output: (dct_y, dct_c, dct_r) as Tensors of size h x w x nb dct coef.
               given an image of size 512 x 512 x 64, the dct_y will be 64 x 64 x 64 and
               dct_c, dct_r will be 32 x 32 x 64
    """
    return TF_LIB.decode_jpeg2dct(buffer,
                                  normalized=normalized,
                                  channels=channels,
                                  name=name)


ops.NotDifferentiable('DecodeJpeg2dct')


def batch_decode(buffers, normalized=True, channels=3, name=None):
    """
    Read/load the DCT coefficients from a batch of string bytes representing JPEG images.

    Arguments
        buffers: the batched tensor of JPEG buffers (batch_size,) of type tf.string
        normalized: boolean. If True, dct coefficients are normalized with quantification tables.
                    If False, no normalization is performed.
        channels: number of color channels for the decoded image.

    Output
       output: (batch_size, dct_y, dct_c, dct_r) as Tensors of size h x w x nb dct coef.
               given images of size 512 x 512 x 64 batched in b strings, the dct_y will be b x 64 x 64 x 64 and
Beispiel #24
0
    y = op.inputs[1]
    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    # pylint: disable=protected-access
    rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
    # pylint: enable=protected-access
    with ops.control_dependencies([grad]):
        # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
        # Tensor (not a number like 2.0) which causes it to convert to Tensor.
        x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
    return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
            -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")


@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
    c = op.inputs[0]
    x = op.inputs[1]
        shape_tensor = _ShapeTensor(shape)
        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
        stddev_tensor = ops.convert_to_tensor(stddev,
                                              dtype=dtype,
                                              name="stddev")
        seed1, seed2 = random_seed.get_seed(seed)
        rnd = gen_random_ops.random_standard_normal(shape_tensor,
                                                    dtype,
                                                    seed=seed1,
                                                    seed2=seed2)
        mul = rnd * stddev_tensor
        value = math_ops.add(mul, mean_tensor, name=name)
        return value


ops.NotDifferentiable("RandomStandardNormal")


def parameterized_truncated_normal(shape,
                                   means=0.0,
                                   stddevs=1.0,
                                   minvals=-2.0,
                                   maxvals=2.0,
                                   dtype=dtypes.float32,
                                   seed=None,
                                   name=None):
    """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.
Beispiel #26
0
  Args:
    name: A name for the generated node. Will also serve as the series name in
      TensorBoard.
    tensor: A tensor of any type and shape to serialize.
    summary_description: Optional summary_pb2.SummaryDescription()
    collections: Optional list of graph collections keys. The new summary op is
      added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.

  Returns:
    A scalar `Tensor` of type `string`. The serialized `Summary` protocol
    buffer.
  """
    # pylint: enable=line-too-long

    if summary_description is None:
        summary_description = summary_pb2.SummaryDescription()

    description = json_format.MessageToJson(summary_description)
    with ops.name_scope(name, None, [tensor]) as scope:
        val = gen_logging_ops._tensor_summary(tensor=tensor,
                                              description=description,
                                              name=scope)
        _Collect(val, collections, [ops.GraphKeys.SUMMARIES])
    return val


ops.NotDifferentiable("TensorSummary")

ops.RegisterShape("TensorSummary")(common_shapes.call_cpp_shape_fn)
Beispiel #27
0
      currently supported, the last dimension of the `warp` tensor must be 2.
    name: Optional name of the op.

  Returns:
    Tensor of resampled values from `data`. The output tensor shape is
    determined by the shape of the warp tensor. For example, if `data` is of
    shape `[batch_size, data_height, data_width, data_num_channels]` and warp of
    shape `[batch_size, dim_0, ... , dim_n, 2]` the output will be of shape
    `[batch_size, dim_0, ... , dim_n, data_num_channels]`.

  Raises:
    ImportError: if the wrapper generated during compilation is not present when
    the function is called.
  """
    if not resampler_is_available():
        raise ImportError("_gen_resampler could not be imported.")
    with ops.name_scope(name, "resampler", [data, warp]):
        data_tensor = ops.convert_to_tensor(data, name="data")
        warp_tensor = ops.convert_to_tensor(warp, name="warp")
        return _gen_resampler.resampler(data_tensor, warp_tensor)


@ops.RegisterGradient("Resampler")
def _resampler_grad(op, grad_output):
    data, warp = op.inputs
    grad_output_tensor = ops.convert_to_tensor(grad_output, name="grad_output")
    return _gen_resampler.resampler_grad(data, warp, grad_output_tensor)


ops.NotDifferentiable("ResamplerGrad")
Beispiel #28
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops

# TODO(b/31222613): These ops may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("TensorArray")
ops.NotDifferentiable("TensorArrayGrad")
ops.NotDifferentiable("TensorArraySize")
ops.NotDifferentiable("TensorArrayClose")

ops.NotDifferentiable("TensorArrayV2")
ops.NotDifferentiable("TensorArrayGradV2")
ops.NotDifferentiable("TensorArraySizeV2")
ops.NotDifferentiable("TensorArrayCloseV2")

ops.NotDifferentiable("TensorArrayV3")
ops.NotDifferentiable("TensorArrayGradV3")
ops.NotDifferentiable("TensorArraySizeV3")
ops.NotDifferentiable("TensorArrayCloseV3")

Beispiel #29
0
@ops.RegisterGradient("SparseTensorToCSRSparseMatrix")
def _SparseTensorToCSRSparseMatrixGrad(op, grad):
    """Gradient for sparse_tensor_to_csr_sparse_matrix op."""
    grad_values = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
        grad, type=op.get_attr("T")).values
    return (None, grad_values, None)


@ops.RegisterGradient("CSRSparseMatrixToSparseTensor")
def _CSRSparseMatrixToSparseTensorGrad(op, *grads):
    """Gradient for csr_sparse_matrix_to_sparse_tensor op."""
    return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
        indices=op.outputs[0], values=grads[1], dense_shape=op.outputs[2])


ops.NotDifferentiable("SparseMatrixNNZ")

ops.NotDifferentiable("SparseMatrixZeros")


def _PruneSparseTensor(unpruned, pruned_pattern):
    """Helper function to prune COO sparse tensor.

  Given two sparse tensors 'unpruned' and 'pruned_pattern', generates another
  sparse tensor with indices and values fron 'unpruned' only if its indices also
  occur in pruned_pattern.

  Args:
    unpruned: COO matrix with unpruned indices
    pruned_pattern: COO matrix with pruned pattern.
Beispiel #30
0
  The input `tags` and `values` must have the same shape.  The generated
  summary has a summary value for each tag-value pair in `tags` and `values`.

  Args:
    tags: A `string` `Tensor`.  Tags for the summaries.
    values: A real numeric Tensor.  Values for the summaries.
    collections: Optional list of graph collections keys. The new summary op is
      added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
    name: A name for the operation (optional).

  Returns:
    A scalar `Tensor` of type `string`. The serialized `Summary` protocol
    buffer.
  """
    with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
        val = gen_logging_ops.scalar_summary(tags=tags,
                                             values=values,
                                             name=scope)
        _Collect(val, collections, [ops.GraphKeys.SUMMARIES])
    return val


ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
ops.NotDifferentiable("Timestamp")