def build_sparse_inequality_splits_eager_fallback(num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function build_sparse_inequality_splits """ _ctx = ctx if ctx else _context.context() num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64) partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32) bucket_ids = _ops.convert_to_tensor(bucket_ids, _dtypes.int64) gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) hessians = _ops.convert_to_tensor(hessians, _dtypes.float32) bucket_boundaries = _ops.convert_to_tensor(bucket_boundaries, _dtypes.float32) class_id = _ops.convert_to_tensor(class_id, _dtypes.int32) feature_column_group_id = _ops.convert_to_tensor(feature_column_group_id, _dtypes.int32) bias_feature_id = _ops.convert_to_tensor(bias_feature_id, _dtypes.int64) l1_regularization = _ops.convert_to_tensor(l1_regularization, _dtypes.float32) l2_regularization = _ops.convert_to_tensor(l2_regularization, _dtypes.float32) tree_complexity_regularization = _ops.convert_to_tensor(tree_complexity_regularization, _dtypes.float32) min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32) multiclass_strategy = _ops.convert_to_tensor(multiclass_strategy, _dtypes.int32) _inputs_flat = [num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy] _attrs = None _result = _execute.execute(b"BuildSparseInequalitySplits", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BuildSparseInequalitySplits", _inputs_flat, _attrs, _result, name) _result = _BuildSparseInequalitySplitsOutput._make(_result) return _result
def zero_initializer(ref, name=None): r"""Initialize 'ref' with all zeros. This op requires that the tensor is not initialized. The tensor will first be allocated memory, then be filled with all zeros. This op is intended to save memory during initialization, if you use this op, you should not run initializer of the 'ref' tensor. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. name: A name for the operation (optional). Returns: Same as "ref". """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "ZeroInitializer", ref=ref, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "ZeroInitializer", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: raise RuntimeError("zero_initializer op does not support eager execution. Arg 'output_ref' is a ref.") raise RuntimeError("zero_initializer op does not support eager execution. Arg 'output_ref' is a ref.")
def ignite_dataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None): r"""IgniteDataset that allows to get data from Apache Ignite. Apache Ignite is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering in-memory speeds at petabyte scale. This contrib package contains an integration between Apache Ignite and TensorFlow. The integration is based on tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side. It allows to use Apache Ignite as a datasource for neural network training, inference and all other computations supported by TensorFlow. Ignite Dataset is based on Apache Ignite Binary Client Protocol. Args: cache_name: A `Tensor` of type `string`. Ignite Cache Name. host: A `Tensor` of type `string`. Ignite Thin Client Host. port: A `Tensor` of type `int32`. Ignite Thin Client Port. local: A `Tensor` of type `bool`. Local flag that defines that data should be fetched from local host only. part: A `Tensor` of type `int32`. Partition data should be fetched from. page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client. schema: A `Tensor` of type `int32`. Internal structure that defines schema of cache objects. permutation: A `Tensor` of type `int32`. Internal structure that defines permutation of cache objects. name: A name for the operation (optional). Returns: A `Tensor` of type `variant`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "IgniteDataset", cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "IgniteDataset", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "IgniteDataset", name, _ctx._post_execution_callbacks, cache_name, host, port, local, part, page_size, schema, permutation) return _result except _core._FallbackException: return ignite_dataset_eager_fallback( cache_name, host, port, local, part, page_size, schema, permutation, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function xla_launch """ _ctx = ctx if ctx else _context.context() if not isinstance(resources, (list, tuple)): raise TypeError( "Expected list for 'resources' argument to " "'xla_launch' Op, not %r." % resources) _attr_Nresources = len(resources) if not isinstance(Tresults, (list, tuple)): raise TypeError( "Expected list for 'Tresults' argument to " "'xla_launch' Op, not %r." % Tresults) Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults] _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx) _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx) resources = _ops.convert_n_to_tensor(resources, _dtypes.resource) _inputs_flat = list(constants) + list(args) + list(resources) _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs, "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function) _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "XlaLaunch", _inputs_flat, _attrs, _result, name) return _result
def image_projective_transform(images, transforms, interpolation, name=None): r"""Applies the given transform to each of the images. Input `image` is a `Tensor` in NHWC format (where the axes are image in batch, rows, columns, and channels. Input `transforms` is a num_images x 8 or 1 x 8 matrix, where each row corresponds to a 3 x 3 projective transformation matrix, with the last entry assumed to be 1. If there is one row, the same transformation will be applied to all images. If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input image, the output pixel is set to 0. Args: images: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`, `half`, `float32`, `float64`. 4D `Tensor`, input image(s) in NHWC format. transforms: A `Tensor` of type `float32`. 2D `Tensor`, projective transform(s) to apply to the image(s). interpolation: A `string`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. 4D `Tensor`, image(s) in NHWC format, generated by applying the `transforms` to the `images`. Satisfies the description above. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: interpolation = _execute.make_str(interpolation, "interpolation") _, _, _op = _op_def_lib._apply_op_helper( "ImageProjectiveTransform", images=images, transforms=transforms, interpolation=interpolation, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "interpolation", _op.get_attr("interpolation")) _execute.record_gradient( "ImageProjectiveTransform", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ImageProjectiveTransform", name, _ctx._post_execution_callbacks, images, transforms, "interpolation", interpolation) return _result except _core._FallbackException: return image_projective_transform_eager_fallback( images, transforms, interpolation=interpolation, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def xla_launch(constants, args, resources, Tresults, function, name=None): r"""XLA Launch Op. For use by the XLA JIT only. Args: constants: A list of `Tensor` objects. args: A list of `Tensor` objects. resources: A list of `Tensor` objects with type `resource`. Tresults: A list of `tf.DTypes`. function: A function decorated with @Defun. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `Tresults`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if not isinstance(resources, (list, tuple)): raise TypeError( "Expected list for 'resources' argument to " "'xla_launch' Op, not %r." % resources) _attr_Nresources = len(resources) if not isinstance(Tresults, (list, tuple)): raise TypeError( "Expected list for 'Tresults' argument to " "'xla_launch' Op, not %r." % Tresults) Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults] _, _, _op = _op_def_lib._apply_op_helper( "XlaLaunch", constants=constants, args=args, resources=resources, Tresults=Tresults, function=function, name=name) _result = _op.outputs[:] if not _result: return _op _inputs_flat = _op.inputs _attrs = ("Tconstants", _op.get_attr("Tconstants"), "Targs", _op.get_attr("Targs"), "Nresources", _op.get_attr("Nresources"), "Tresults", _op.get_attr("Tresults"), "function", _op.get_attr("function")) _execute.record_gradient( "XlaLaunch", _inputs_flat, _attrs, _result, name) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "XlaLaunch", name, _ctx._post_execution_callbacks, constants, args, resources, "Tresults", Tresults, "function", function) return _result except _core._FallbackException: return xla_launch_eager_fallback( constants, args, resources, Tresults=Tresults, function=function, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None): r"""Selects num_to_sample rows of input using the KMeans++ criterion. Rows of points are assumed to be input points. One row is selected at random. Subsequent rows are sampled with probability proportional to the squared L2 distance from the nearest row selected thus far till num_to_sample rows have been sampled. Args: points: A `Tensor` of type `float32`. Matrix of shape (n, d). Rows are assumed to be input points. num_to_sample: A `Tensor` of type `int64`. Scalar. The number of rows to sample. This value must not be larger than n. seed: A `Tensor` of type `int64`. Scalar. Seed for initializing the random number generator. num_retries_per_sample: A `Tensor` of type `int64`. Scalar. For each row that is sampled, this parameter specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. Matrix of shape (num_to_sample, d). The sampled rows. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "KmeansPlusPlusInitialization", points=points, num_to_sample=num_to_sample, seed=seed, num_retries_per_sample=num_retries_per_sample, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "KmeansPlusPlusInitialization", name, _ctx._post_execution_callbacks, points, num_to_sample, seed, num_retries_per_sample) return _result except _core._FallbackException: return kmeans_plus_plus_initialization_eager_fallback( points, num_to_sample, seed, num_retries_per_sample, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function single_image_random_dot_stereograms """ _ctx = ctx if ctx else _context.context() if hidden_surface_removal is None: hidden_surface_removal = True hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal") if convergence_dots_size is None: convergence_dots_size = 8 convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size") if dots_per_inch is None: dots_per_inch = 72 dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch") if eye_separation is None: eye_separation = 2.5 eye_separation = _execute.make_float(eye_separation, "eye_separation") if mu is None: mu = 0.3333 mu = _execute.make_float(mu, "mu") if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") if normalize_max is None: normalize_max = -100 normalize_max = _execute.make_float(normalize_max, "normalize_max") if normalize_min is None: normalize_min = 100 normalize_min = _execute.make_float(normalize_min, "normalize_min") if border_level is None: border_level = 0 border_level = _execute.make_float(border_level, "border_level") if number_colors is None: number_colors = 256 number_colors = _execute.make_int(number_colors, "number_colors") if output_image_shape is None: output_image_shape = [1024, 768, 1] output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape") if output_data_window is None: output_data_window = [1022, 757] output_data_window = _execute.make_shape(output_data_window, "output_data_window") _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx) _inputs_flat = [depth_values] _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal, "convergence_dots_size", convergence_dots_size, "dots_per_inch", dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize", normalize, "normalize_max", normalize_max, "normalize_min", normalize_min, "border_level", border_level, "number_colors", number_colors, "output_image_shape", output_image_shape, "output_data_window", output_data_window) _result = _execute.execute(b"SingleImageRandomDotStereograms", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def range_decode(encoded, shape, cdf, precision, name=None): r"""Decodes a range-coded `code` into an int32 tensor of shape `shape`. This is the reverse op of RangeEncode. The shape of the tensor that was encoded should be known by the caller. Implementation notes: - If wrong input was given (e.g., corrupt `encoded` string, or `cdf` or `precision` do not match encoder), the decode is unsuccessful. Because of potential performance issues, the decoder does not return error status. Args: encoded: A `Tensor` of type `string`. A scalar string tensor from RangeEncode. shape: A `Tensor` of type `int32`. An int32 1-D tensor representing the shape of the data encoded by RangeEncode. cdf: A `Tensor` of type `int32`. precision: An `int` that is `>= 1`. The number of bits for probability quantization. Must be <= 16, and must match the precision used by RangeEncode that produced `encoded`. name: A name for the operation (optional). Returns: A `Tensor` of type `int16`. An int16 tensor with shape equal to `shape`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: precision = _execute.make_int(precision, "precision") _, _, _op = _op_def_lib._apply_op_helper( "RangeDecode", encoded=encoded, shape=shape, cdf=cdf, precision=precision, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("precision", _op.get_attr("precision")) _execute.record_gradient( "RangeDecode", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "RangeDecode", name, _ctx._post_execution_callbacks, encoded, shape, cdf, "precision", precision) return _result except _core._FallbackException: return range_decode_eager_fallback( encoded, shape, cdf, precision=precision, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def nearest_neighbors(points, centers, k, name=None): r"""Selects the k nearest centers for each point. Rows of points are assumed to be input points. Rows of centers are assumed to be the list of candidate centers. For each point, the k centers that have least L2 distance to it are computed. Args: points: A `Tensor` of type `float32`. Matrix of shape (n, d). Rows are assumed to be input points. centers: A `Tensor` of type `float32`. Matrix of shape (m, d). Rows are assumed to be centers. k: A `Tensor` of type `int64`. Scalar. Number of nearest centers to return for each point. If k is larger than m, then only m centers are returned. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances). nearest_center_indices: A `Tensor` of type `int64`. Matrix of shape (n, min(m, k)). Each row contains the indices of the centers closest to the corresponding point, ordered by increasing distance. nearest_center_distances: A `Tensor` of type `float32`. Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the corresponding center in nearest_center_indices. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "NearestNeighbors", points=points, centers=centers, k=k, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "NearestNeighbors", _inputs_flat, _attrs, _result, name) _result = _NearestNeighborsOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "NearestNeighbors", name, _ctx._post_execution_callbacks, points, centers, k) _result = _NearestNeighborsOutput._make(_result) return _result except _core._FallbackException: return nearest_neighbors_eager_fallback( points, centers, k, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def image_connected_components(image, name=None): r"""Find the connected components of image(s). For each image (along the 0th axis), all connected components of adjacent pixels with the same non-zero value are detected and given unique ids. The returned `components` tensor has 0s for the zero pixels of `images`, and arbitrary nonzero ids for the connected components of nonzero values. Ids are unique across all of the images, and are in row-major order by the first pixel in the component. Uses union-find with union by rank but not path compression, giving a runtime of `O(n log n)`. See: https://en.wikipedia.org/wiki/Disjoint-set_data_structure#Time_Complexity Args: image: A `Tensor`. Must be one of the following types: `int64`, `int32`, `uint16`, `int16`, `uint8`, `int8`, `half`, `float32`, `float64`, `bool`, `string`. Image(s) with shape (N, H, W). name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. Component ids for each pixel in "image". Same shape as "image". Zero pixels all have an output of 0, and all components of adjacent pixels with the same value are given consecutive ids, starting from 1. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "ImageConnectedComponents", image=image, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype")) _execute.record_gradient( "ImageConnectedComponents", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ImageConnectedComponents", name, _ctx._post_execution_callbacks, image) return _result except _core._FallbackException: return image_connected_components_eager_fallback( image, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def tree_ensemble_used_handlers(tree_ensemble_handle, stamp_token, num_all_handlers, name=None): r"""Returns the mask of used handlers along with the number of non-zero elements in this mask. Used in feature selection. Args: tree_ensemble_handle: A `Tensor` of type `resource`. Handle to the tree ensemble. stamp_token: A `Tensor` of type `int64`. Token to use as the new value of the resource stamp. num_all_handlers: An `int` that is `>= 0`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (num_used_handlers, used_handlers_mask). num_used_handlers: A `Tensor` of type `int64`. number of feature column handlers used in the model. used_handlers_mask: A `Tensor` of type `bool`. A boolean vector of showing which handlers are used in the model. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers") _, _, _op = _op_def_lib._apply_op_helper( "TreeEnsembleUsedHandlers", tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, num_all_handlers=num_all_handlers, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_all_handlers", _op.get_attr("num_all_handlers")) _execute.record_gradient( "TreeEnsembleUsedHandlers", _inputs_flat, _attrs, _result, name) _result = _TreeEnsembleUsedHandlersOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TreeEnsembleUsedHandlers", name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token, "num_all_handlers", num_all_handlers) _result = _TreeEnsembleUsedHandlersOutput._make(_result) return _result except _core._FallbackException: return tree_ensemble_used_handlers_eager_fallback( tree_ensemble_handle, stamp_token, num_all_handlers=num_all_handlers, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def pmf_to_quantized_cdf(pmf, precision, name=None): r"""Converts PMF to quantized CDF. This op uses floating-point operations internally. Therefore the quantized output may not be consistent across multiple platforms. For entropy encoders and decoders to have the same quantized CDF on different platforms, the quantized CDF should be produced once and saved, then the saved quantized CDF should be used everywhere. After quantization, if PMF does not sum to 2^precision, then some values of PMF are increased or decreased to adjust the sum to equal to 2^precision. Note that the input PMF is pre-quantization. The input PMF is not normalized by this op prior to quantization. Therefore the user is responsible for normalizing PMF if necessary. Args: pmf: A `Tensor` of type `float32`. precision: An `int` that is `>= 1`. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: precision = _execute.make_int(precision, "precision") _, _, _op = _op_def_lib._apply_op_helper( "PmfToQuantizedCdf", pmf=pmf, precision=precision, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("precision", _op.get_attr("precision")) _execute.record_gradient( "PmfToQuantizedCdf", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "PmfToQuantizedCdf", name, _ctx._post_execution_callbacks, pmf, "precision", precision) return _result except _core._FallbackException: return pmf_to_quantized_cdf_eager_fallback( pmf, precision=precision, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def xla_cluster_output_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function xla_cluster_output """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"XlaClusterOutput", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "XlaClusterOutput", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def tree_ensemble_serialize_eager_fallback(tree_ensemble_handle, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tree_ensemble_serialize """ _ctx = ctx if ctx else _context.context() tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) _inputs_flat = [tree_ensemble_handle] _attrs = None _result = _execute.execute(b"TreeEnsembleSerialize", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TreeEnsembleSerialize", _inputs_flat, _attrs, _result, name) _result = _TreeEnsembleSerializeOutput._make(_result) return _result
def tree_ensemble_stats(tree_ensemble_handle, stamp_token, name=None): r"""Retrieves stats related to the tree ensemble. Args: tree_ensemble_handle: A `Tensor` of type `resource`. Handle to the ensemble variable. stamp_token: A `Tensor` of type `int64`. Stamp token for validating operation consistency. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (num_trees, num_layers, active_tree, active_layer, attempted_trees, attempted_layers). num_trees: A `Tensor` of type `int64`. Scalar indicating the number of finalized trees in the ensemble. num_layers: A `Tensor` of type `int64`. Scalar indicating the number of layers in the ensemble. active_tree: A `Tensor` of type `int64`. Scalar indicating the active tree being trained. active_layer: A `Tensor` of type `int64`. Scalar indicating the active layer being trained. attempted_trees: A `Tensor` of type `int64`. attempted_layers: A `Tensor` of type `int64`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "TreeEnsembleStats", tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "TreeEnsembleStats", _inputs_flat, _attrs, _result, name) _result = _TreeEnsembleStatsOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TreeEnsembleStats", name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token) _result = _TreeEnsembleStatsOutput._make(_result) return _result except _core._FallbackException: return tree_ensemble_stats_eager_fallback( tree_ensemble_handle, stamp_token, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def image_connected_components_eager_fallback(image, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function image_connected_components """ _ctx = ctx if ctx else _context.context() _attr_dtype, (image,) = _execute.args_to_matching_eager([image], _ctx) _inputs_flat = [image] _attrs = ("dtype", _attr_dtype) _result = _execute.execute(b"ImageConnectedComponents", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ImageConnectedComponents", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def pmf_to_quantized_cdf_eager_fallback(pmf, precision, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function pmf_to_quantized_cdf """ _ctx = ctx if ctx else _context.context() precision = _execute.make_int(precision, "precision") pmf = _ops.convert_to_tensor(pmf, _dtypes.float32) _inputs_flat = [pmf] _attrs = ("precision", precision) _result = _execute.execute(b"PmfToQuantizedCdf", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "PmfToQuantizedCdf", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def decision_tree_ensemble_resource_handle_op(container="", shared_name="", name=None): r"""TODO: add doc. Args: container: An optional `string`. Defaults to `""`. shared_name: An optional `string`. Defaults to `""`. name: A name for the operation (optional). Returns: A `Tensor` of type `resource`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if container is None: container = "" container = _execute.make_str(container, "container") if shared_name is None: shared_name = "" shared_name = _execute.make_str(shared_name, "shared_name") _, _, _op = _op_def_lib._apply_op_helper( "DecisionTreeEnsembleResourceHandleOp", container=container, shared_name=shared_name, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name")) _execute.record_gradient( "DecisionTreeEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "DecisionTreeEnsembleResourceHandleOp", name, _ctx._post_execution_callbacks, "container", container, "shared_name", shared_name) return _result except _core._FallbackException: return decision_tree_ensemble_resource_handle_op_eager_fallback( container=container, shared_name=shared_name, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def kmc2_chain_initialization_eager_fallback(distances, seed, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function kmc2_chain_initialization """ _ctx = ctx if ctx else _context.context() distances = _ops.convert_to_tensor(distances, _dtypes.float32) seed = _ops.convert_to_tensor(seed, _dtypes.int64) _inputs_flat = [distances, seed] _attrs = None _result = _execute.execute(b"KMC2ChainInitialization", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "KMC2ChainInitialization", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def zero_var_initializer_eager_fallback(var, dtype, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function zero_var_initializer """ _ctx = ctx if ctx else _context.context() dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") var = _ops.convert_to_tensor(var, _dtypes.resource) _inputs_flat = [var] _attrs = ("dtype", dtype, "shape", shape) _result = _execute.execute(b"ZeroVarInitializer", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ZeroVarInitializer", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def nearest_neighbors_eager_fallback(points, centers, k, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function nearest_neighbors """ _ctx = ctx if ctx else _context.context() points = _ops.convert_to_tensor(points, _dtypes.float32) centers = _ops.convert_to_tensor(centers, _dtypes.float32) k = _ops.convert_to_tensor(k, _dtypes.int64) _inputs_flat = [points, centers, k] _attrs = None _result = _execute.execute(b"NearestNeighbors", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "NearestNeighbors", _inputs_flat, _attrs, _result, name) _result = _NearestNeighborsOutput._make(_result) return _result
def range_encode_eager_fallback(data, cdf, precision, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function range_encode """ _ctx = ctx if ctx else _context.context() precision = _execute.make_int(precision, "precision") data = _ops.convert_to_tensor(data, _dtypes.int16) cdf = _ops.convert_to_tensor(cdf, _dtypes.int32) _inputs_flat = [data, cdf] _attrs = ("precision", precision) _result = _execute.execute(b"RangeEncode", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "RangeEncode", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def kmc2_chain_initialization(distances, seed, name=None): r"""Returns the index of a data point that should be added to the seed set. Entries in distances are assumed to be squared distances of candidate points to the already sampled centers in the seed set. The op constructs one Markov chain of the k-MC^2 algorithm and returns the index of one candidate point to be added as an additional cluster center. Args: distances: A `Tensor` of type `float32`. Vector with squared distances to the closest previously sampled cluster center for each candidate point. seed: A `Tensor` of type `int64`. Scalar. Seed for initializing the random number generator. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. Scalar with the index of the sampled point. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "KMC2ChainInitialization", distances=distances, seed=seed, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "KMC2ChainInitialization", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "KMC2ChainInitialization", name, _ctx._post_execution_callbacks, distances, seed) return _result except _core._FallbackException: return kmc2_chain_initialization_eager_fallback( distances, seed, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def zero_var_initializer(var, dtype, shape, name=None): r"""Initialize 'var' with all zeros. This op requires that the resource var is not initialized. The var will first be allocated memory, then be filled with all zeros. This op is intended to save memory during initialization, if you use this op, you should not run initializer of the var. Args: var: A `Tensor` of type `resource`. Should be a ResourceVariable. dtype: A `tf.DType`. shape: A `tf.TensorShape` or list of `ints`. name: A name for the operation (optional). Returns: Same as "var". """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "ZeroVarInitializer", var=var, dtype=dtype, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape")) _execute.record_gradient( "ZeroVarInitializer", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ZeroVarInitializer", name, _ctx._post_execution_callbacks, var, "dtype", dtype, "shape", shape) return _result except _core._FallbackException: return zero_var_initializer_eager_fallback( var, dtype=dtype, shape=shape, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def tree_ensemble_used_handlers_eager_fallback(tree_ensemble_handle, stamp_token, num_all_handlers, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tree_ensemble_used_handlers """ _ctx = ctx if ctx else _context.context() num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers") tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64) _inputs_flat = [tree_ensemble_handle, stamp_token] _attrs = ("num_all_handlers", num_all_handlers) _result = _execute.execute(b"TreeEnsembleUsedHandlers", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TreeEnsembleUsedHandlers", _inputs_flat, _attrs, _result, name) _result = _TreeEnsembleUsedHandlersOutput._make(_result) return _result
def image_projective_transform_eager_fallback(images, transforms, interpolation, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function image_projective_transform """ _ctx = ctx if ctx else _context.context() interpolation = _execute.make_str(interpolation, "interpolation") _attr_dtype, (images,) = _execute.args_to_matching_eager([images], _ctx) transforms = _ops.convert_to_tensor(transforms, _dtypes.float32) _inputs_flat = [images, transforms] _attrs = ("dtype", _attr_dtype, "interpolation", interpolation) _result = _execute.execute(b"ImageProjectiveTransform", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ImageProjectiveTransform", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def adjust_hsv_in_yiq_eager_fallback(images, delta_h, scale_s, scale_v, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function adjust_hsv_in_yiq """ _ctx = ctx if ctx else _context.context() _attr_T, (images,) = _execute.args_to_matching_eager([images], _ctx) delta_h = _ops.convert_to_tensor(delta_h, _dtypes.float32) scale_s = _ops.convert_to_tensor(scale_s, _dtypes.float32) scale_v = _ops.convert_to_tensor(scale_v, _dtypes.float32) _inputs_flat = [images, delta_h, scale_s, scale_v] _attrs = ("T", _attr_T) _result = _execute.execute(b"AdjustHsvInYiq", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "AdjustHsvInYiq", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def kmeans_plus_plus_initialization_eager_fallback(points, num_to_sample, seed, num_retries_per_sample, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function kmeans_plus_plus_initialization """ _ctx = ctx if ctx else _context.context() points = _ops.convert_to_tensor(points, _dtypes.float32) num_to_sample = _ops.convert_to_tensor(num_to_sample, _dtypes.int64) seed = _ops.convert_to_tensor(seed, _dtypes.int64) num_retries_per_sample = _ops.convert_to_tensor(num_retries_per_sample, _dtypes.int64) _inputs_flat = [points, num_to_sample, seed, num_retries_per_sample] _attrs = None _result = _execute.execute(b"KmeansPlusPlusInitialization", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def bipartite_match_eager_fallback(distance_mat, num_valid_rows, top_k=-1, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bipartite_match """ _ctx = ctx if ctx else _context.context() if top_k is None: top_k = -1 top_k = _execute.make_int(top_k, "top_k") distance_mat = _ops.convert_to_tensor(distance_mat, _dtypes.float32) num_valid_rows = _ops.convert_to_tensor(num_valid_rows, _dtypes.float32) _inputs_flat = [distance_mat, num_valid_rows] _attrs = ("top_k", top_k) _result = _execute.execute(b"BipartiteMatch", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BipartiteMatch", _inputs_flat, _attrs, _result, name) _result = _BipartiteMatchOutput._make(_result) return _result
def sdca_optimizer_v2(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptive=True, name=None): r"""Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for linear models with L1 + L2 regularization. As global optimization objective is strongly-convex, the optimizer optimizes the dual objective at each step. The optimizer applies each update one example at a time. Examples are sampled uniformly, and the optimizer is learning rate free and enjoys linear convergence rate. [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br> Shai Shalev-Shwartz, Tong Zhang. 2012 $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br> Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, Martin Takac. 2015 [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br> Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 Args: sparse_example_indices: A list of `Tensor` objects with type `int64`. a list of vectors which contain example indices. sparse_feature_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`. a list of vectors which contain feature indices. sparse_feature_values: A list of `Tensor` objects with type `float32`. a list of vectors which contains feature value associated with each feature group. dense_features: A list of `Tensor` objects with type `float32`. a list of matrices which contains the dense feature values. example_weights: A `Tensor` of type `float32`. a vector which contains the weight associated with each example. example_labels: A `Tensor` of type `float32`. a vector which contains the label/target associated with each example. sparse_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`. a list of vectors where each value is the indices which has corresponding weights in sparse_weights. This field maybe omitted for the dense approach. sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`. a list of vectors where each value is the weight associated with a sparse feature group. dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`. a list of vectors where the values are the weights associated with a dense feature group. example_state_data: A `Tensor` of type `float32`. a list of vectors containing the example state data. loss_type: A `string` from: `"logistic_loss", "squared_loss", "hinge_loss", "smooth_hinge_loss", "poisson_loss"`. Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses. l1: A `float`. Symmetric l1 regularization strength. l2: A `float`. Symmetric l2 regularization strength. num_loss_partitions: An `int` that is `>= 1`. Number of partitions of the global loss function. num_inner_iterations: An `int` that is `>= 1`. Number of iterations per mini-batch. adaptive: An optional `bool`. Defaults to `True`. Whether to use Adaptive SDCA for the inner loop. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights). out_example_state_data: A `Tensor` of type `float32`. out_delta_sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`. out_delta_dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "SdcaOptimizerV2", name, sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, "loss_type", loss_type, "adaptive", adaptive, "l1", l1, "l2", l2, "num_loss_partitions", num_loss_partitions, "num_inner_iterations", num_inner_iterations) _result = _SdcaOptimizerV2Output._make(_result) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return sdca_optimizer_v2_eager_fallback( sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type=loss_type, adaptive=adaptive, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. # Add nodes to the TensorFlow graph. if not isinstance(sparse_example_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_example_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_example_indices) _attr_num_sparse_features = len(sparse_example_indices) if not isinstance(sparse_feature_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_feature_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_feature_indices) if len(sparse_feature_indices) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_feature_indices' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_feature_indices), _attr_num_sparse_features)) if not isinstance(sparse_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_indices) if len(sparse_indices) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_indices' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_indices), _attr_num_sparse_features)) if not isinstance(sparse_weights, (list, tuple)): raise TypeError( "Expected list for 'sparse_weights' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_weights) if len(sparse_weights) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_weights' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_weights), _attr_num_sparse_features)) if not isinstance(sparse_feature_values, (list, tuple)): raise TypeError( "Expected list for 'sparse_feature_values' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_feature_values) _attr_num_sparse_features_with_values = len(sparse_feature_values) if not isinstance(dense_features, (list, tuple)): raise TypeError( "Expected list for 'dense_features' argument to " "'sdca_optimizer_v2' Op, not %r." % dense_features) _attr_num_dense_features = len(dense_features) if not isinstance(dense_weights, (list, tuple)): raise TypeError( "Expected list for 'dense_weights' argument to " "'sdca_optimizer_v2' Op, not %r." % dense_weights) if len(dense_weights) != _attr_num_dense_features: raise ValueError( "List argument 'dense_weights' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'dense_features'." % (len(dense_weights), _attr_num_dense_features)) loss_type = _execute.make_str(loss_type, "loss_type") l1 = _execute.make_float(l1, "l1") l2 = _execute.make_float(l2, "l2") num_loss_partitions = _execute.make_int(num_loss_partitions, "num_loss_partitions") num_inner_iterations = _execute.make_int(num_inner_iterations, "num_inner_iterations") if adaptive is None: adaptive = True adaptive = _execute.make_bool(adaptive, "adaptive") _, _, _op, _outputs = _op_def_library._apply_op_helper( "SdcaOptimizerV2", sparse_example_indices=sparse_example_indices, sparse_feature_indices=sparse_feature_indices, sparse_feature_values=sparse_feature_values, dense_features=dense_features, example_weights=example_weights, example_labels=example_labels, sparse_indices=sparse_indices, sparse_weights=sparse_weights, dense_weights=dense_weights, example_state_data=example_state_data, loss_type=loss_type, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations, adaptive=adaptive, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("loss_type", _op.get_attr("loss_type"), "adaptive", _op._get_attr_bool("adaptive"), "num_sparse_features", _op._get_attr_int("num_sparse_features"), "num_sparse_features_with_values", _op._get_attr_int("num_sparse_features_with_values"), "num_dense_features", _op._get_attr_int("num_dense_features"), "l1", _op.get_attr("l1"), "l2", _op.get_attr("l2"), "num_loss_partitions", _op._get_attr_int("num_loss_partitions"), "num_inner_iterations", _op._get_attr_int("num_inner_iterations")) _inputs_flat = _op.inputs _execute.record_gradient( "SdcaOptimizerV2", _inputs_flat, _attrs, _result) _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]] + _result[1 + _attr_num_sparse_features:] _result = _result[:2] + [_result[2:]] _result = _SdcaOptimizerV2Output._make(_result) return _result
def right_shift(x, y, name=None): r"""Elementwise computes the bitwise right-shift of `x` and `y`. Performs a logical shift for unsigned integer types, and an arithmetic shift for signed integer types. If `y` is negative, or greater than or equal to than the width of `x` in bits the result is implementation defined. Example: ```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops import numpy as np dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] for dtype in dtype_list: lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) right_shift_result = bitwise_ops.right_shift(lhs, rhs) print(right_shift_result) # This will print: # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) lhs = np.array([-2, 64, 101, 32], dtype=np.int8) rhs = np.array([-1, -5, -3, -14], dtype=np.int8) bitwise_ops.right_shift(lhs, rhs) # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> ``` Args: x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "RightShift", name, tld.op_callbacks, x, y) return _result except _core._FallbackException: try: return right_shift_eager_fallback(x, y, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(right_shift, x=x, y=y, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper("RightShift", x=x, y=y, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(right_shift, x=x, y=y, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient("RightShift", _inputs_flat, _attrs, _result) _result, = _result return _result
def invert(x, name=None): r"""Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010. Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. This operation is performed on each element of the tensor argument `x`. Example: ```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops # flip 2 (00000010) to -3 (11111101) tf.assert_equal(-3, bitwise_ops.invert(2)) dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] inputs = [0, 5, 3, 14] for dtype in dtype_list: # Because of issues with negative numbers, let's test this indirectly. # 1. invert(a) and a = 0 # 2. invert(a) or a = invert(0) input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( input_tensor, bitwise_ops.invert(input_tensor)), bitwise_ops.bitwise_or( input_tensor, bitwise_ops.invert(input_tensor)), bitwise_ops.invert( tf.constant(0, dtype=dtype))] expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) expected = tf.cast([not_0] * 4, tf.float32) tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) # For unsigned dtypes let's also check the result directly. if dtype.is_unsigned: inverted = bitwise_ops.invert(input_tensor) expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) ``` Args: x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Invert", name, tld.op_callbacks, x) return _result except _core._FallbackException: try: return invert_eager_fallback(x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(invert, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper("Invert", x=x, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(invert, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient("Invert", _inputs_flat, _attrs, _result) _result, = _result return _result
def trt_engine_op_eager_fallback(in_tensor, serialized_segment, OutT, workspace_size_bytes, precision_mode, segment_func="", max_cached_engines_count=1, calibration_data="", use_calibration=True, segment_funcdef_name="", cached_engine_batches=[], fixed_input_size=True, input_shapes=[], output_shapes=[], static_engine=True, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function trt_engine_op """ _ctx = ctx if ctx else _context.context() serialized_segment = _execute.make_str(serialized_segment, "serialized_segment") if not isinstance(OutT, (list, tuple)): raise TypeError("Expected list for 'OutT' argument to " "'trt_engine_op' Op, not %r." % OutT) OutT = [_execute.make_type(_t, "OutT") for _t in OutT] workspace_size_bytes = _execute.make_int(workspace_size_bytes, "workspace_size_bytes") precision_mode = _execute.make_str(precision_mode, "precision_mode") if segment_func is None: segment_func = "" if max_cached_engines_count is None: max_cached_engines_count = 1 max_cached_engines_count = _execute.make_int(max_cached_engines_count, "max_cached_engines_count") if calibration_data is None: calibration_data = "" calibration_data = _execute.make_str(calibration_data, "calibration_data") if use_calibration is None: use_calibration = True use_calibration = _execute.make_bool(use_calibration, "use_calibration") if segment_funcdef_name is None: segment_funcdef_name = "" segment_funcdef_name = _execute.make_str(segment_funcdef_name, "segment_funcdef_name") if cached_engine_batches is None: cached_engine_batches = [] if not isinstance(cached_engine_batches, (list, tuple)): raise TypeError( "Expected list for 'cached_engine_batches' argument to " "'trt_engine_op' Op, not %r." % cached_engine_batches) cached_engine_batches = [ _execute.make_int(_i, "cached_engine_batches") for _i in cached_engine_batches ] if fixed_input_size is None: fixed_input_size = True fixed_input_size = _execute.make_bool(fixed_input_size, "fixed_input_size") if input_shapes is None: input_shapes = [] if not isinstance(input_shapes, (list, tuple)): raise TypeError("Expected list for 'input_shapes' argument to " "'trt_engine_op' Op, not %r." % input_shapes) input_shapes = [ _execute.make_shape(_s, "input_shapes") for _s in input_shapes ] if output_shapes is None: output_shapes = [] if not isinstance(output_shapes, (list, tuple)): raise TypeError("Expected list for 'output_shapes' argument to " "'trt_engine_op' Op, not %r." % output_shapes) output_shapes = [ _execute.make_shape(_s, "output_shapes") for _s in output_shapes ] if static_engine is None: static_engine = True static_engine = _execute.make_bool(static_engine, "static_engine") _attr_InT, in_tensor = _execute.convert_to_mixed_eager_tensors( in_tensor, _ctx) _inputs_flat = list(in_tensor) _attrs = ("serialized_segment", serialized_segment, "segment_func", segment_func, "InT", _attr_InT, "OutT", OutT, "max_cached_engines_count", max_cached_engines_count, "workspace_size_bytes", workspace_size_bytes, "precision_mode", precision_mode, "calibration_data", calibration_data, "use_calibration", use_calibration, "segment_funcdef_name", segment_funcdef_name, "cached_engine_batches", cached_engine_batches, "fixed_input_size", fixed_input_size, "input_shapes", input_shapes, "output_shapes", output_shapes, "static_engine", static_engine) _result = _execute.execute(b"TRTEngineOp", len(OutT), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("TRTEngineOp", _inputs_flat, _attrs, _result, name) return _result
def ignite_dataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None): r"""IgniteDataset that allows to get data from Apache Ignite. Apache Ignite is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering in-memory speeds at petabyte scale. This contrib package contains an integration between Apache Ignite and TensorFlow. The integration is based on tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side. It allows to use Apache Ignite as a datasource for neural network training, inference and all other computations supported by TensorFlow. Ignite Dataset is based on Apache Ignite Binary Client Protocol. Args: cache_name: A `Tensor` of type `string`. Ignite Cache Name. host: A `Tensor` of type `string`. Ignite Thin Client Host. port: A `Tensor` of type `int32`. Ignite Thin Client Port. local: A `Tensor` of type `bool`. Local flag that defines that data should be fetched from local host only. part: A `Tensor` of type `int32`. Partition data should be fetched from. page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client. schema: A `Tensor` of type `int32`. Internal structure that defines schema of cache objects. permutation: A `Tensor` of type `int32`. Internal structure that defines permutation of cache objects. name: A name for the operation (optional). Returns: A `Tensor` of type `variant`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "IgniteDataset", name, _ctx.post_execution_callbacks, cache_name, host, port, local, part, page_size, schema, permutation) return _result except _core._FallbackException: try: return ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper("IgniteDataset", cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient("IgniteDataset", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def skip_gram_generate_candidates(input_tensor, min_skips, max_skips, start, limit, emit_self_as_target, seed=0, seed2=0, name=None): r"""Generates skip-gram token and label paired Tensors from the input tensor. See docs for the public-facing skip_gram_sample() Python op for more details. Args: input_tensor: A `Tensor`. min_skips: A `Tensor` of type `int32`. max_skips: A `Tensor` of type `int32`. start: A `Tensor` of type `int32`. limit: A `Tensor` of type `int32`. emit_self_as_target: A `Tensor` of type `bool`. seed: An optional `int`. Defaults to `0`. seed2: An optional `int`. Defaults to `0`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (tokens, labels). tokens: A `Tensor`. Has the same type as `input_tensor`. labels: A `Tensor`. Has the same type as `input_tensor`. """ if seed is None: seed = 0 seed = _execute.make_int(seed, "seed") if seed2 is None: seed2 = 0 seed2 = _execute.make_int(seed2, "seed2") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "SkipGramGenerateCandidates", input_tensor=input_tensor, min_skips=min_skips, max_skips=max_skips, start=start, limit=limit, emit_self_as_target=emit_self_as_target, seed=seed, seed2=seed2, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2")) else: _attr_T, (input_tensor, ) = _execute.args_to_matching_eager( [input_tensor], _ctx) min_skips = _ops.convert_to_tensor(min_skips, _dtypes.int32) max_skips = _ops.convert_to_tensor(max_skips, _dtypes.int32) start = _ops.convert_to_tensor(start, _dtypes.int32) limit = _ops.convert_to_tensor(limit, _dtypes.int32) emit_self_as_target = _ops.convert_to_tensor(emit_self_as_target, _dtypes.bool) _inputs_flat = [ input_tensor, min_skips, max_skips, start, limit, emit_self_as_target ] _attrs = ("T", _attr_T, "seed", seed, "seed2", seed2) _result = _execute.execute(b"SkipGramGenerateCandidates", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("SkipGramGenerateCandidates", _inputs_flat, _attrs, _result, name) _result = _SkipGramGenerateCandidatesOutput._make(_result) return _result
def _audio_summary(tag, tensor, sample_rate, max_outputs=3, name=None): r"""Outputs a `Summary` protocol buffer with audio. The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`. sample_rate: A `float`. The sample rate of the signal in hertz. max_outputs: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate audio for. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ sample_rate = _execute.make_float(sample_rate, "sample_rate") if max_outputs is None: max_outputs = 3 max_outputs = _execute.make_int(max_outputs, "max_outputs") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("AudioSummary", tag=tag, tensor=tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("sample_rate", _op.get_attr("sample_rate"), "max_outputs", _op.get_attr("max_outputs")) else: tag = _ops.convert_to_tensor(tag, _dtypes.string) tensor = _ops.convert_to_tensor(tensor, _dtypes.float32) _inputs_flat = [tag, tensor] _attrs = ("sample_rate", sample_rate, "max_outputs", max_outputs) _result = _execute.execute(b"AudioSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("AudioSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def collective_bcast_recv(T, group_size, group_key, instance_key, shape, name=None): r"""Receives a tensor value broadcast from another device. Args: T: A `tf.DType` from: `tf.float32, tf.half, tf.float64, tf.int32, tf.int64`. group_size: An `int`. group_key: An `int`. instance_key: An `int`. shape: A `tf.TensorShape` or list of `ints`. name: A name for the operation (optional). Returns: A `Tensor` of type `T`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "CollectiveBcastRecv", name, _ctx._post_execution_callbacks, "T", T, "group_size", group_size, "group_key", group_key, "instance_key", instance_key, "shape", shape) return _result except _core._FallbackException: try: return collective_bcast_recv_eager_fallback( T=T, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. T = _execute.make_type(T, "T") group_size = _execute.make_int(group_size, "group_size") group_key = _execute.make_int(group_key, "group_key") instance_key = _execute.make_int(instance_key, "instance_key") shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper("CollectiveBcastRecv", T=T, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"), "group_key", _op.get_attr("group_key"), "instance_key", _op.get_attr("instance_key"), "shape", _op.get_attr("shape")) _execute.record_gradient("CollectiveBcastRecv", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def encode_proto(sizes, values, field_names, message_type, descriptor_source="local://", name=None): r"""The op serializes protobuf messages provided in the input tensors. The types of the tensors in `values` must match the schema for the fields specified in `field_names`. All the tensors in `values` must have a common shape prefix, *batch_shape*. The `sizes` tensor specifies repeat counts for each field. The repeat count (last dimension) of a each tensor in `values` must be greater than or equal to corresponding repeat count in `sizes`. A `message_type` name must be provided to give context for the field names. The actual message descriptor can be looked up either in the linked-in descriptor pool or a filename provided by the caller using the `descriptor_source` attribute. For the most part, the mapping between Proto field types and TensorFlow dtypes is straightforward. However, there are a few special cases: - A proto field that contains a submessage or group can only be converted to `DT_STRING` (the serialized submessage). This is to reduce the complexity of the API. The resulting string can be used as input to another instance of the decode_proto op. - TensorFlow lacks support for unsigned integers. The ops represent uint64 types as a `DT_INT64` with the same twos-complement bit pattern (the obvious way). Unsigned int32 values can be represented exactly by specifying type `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in the `output_types` attribute. The `descriptor_source` attribute selects the source of protocol descriptors to consult when looking up `message_type`. This may be: - An empty string or "local://", in which case protocol descriptors are created for C++ (not Python) proto definitions linked to the binary. - A file, in which case protocol descriptors are created from the file, which is expected to contain a `FileDescriptorSet` serialized as a string. NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` and `--include_imports` options to the protocol compiler `protoc`. - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`, which is expected to be a `FileDescriptorSet` serialized as a string. Args: sizes: A `Tensor` of type `int32`. Tensor of int32 with shape `[batch_shape, len(field_names)]`. values: A list of `Tensor` objects. List of tensors containing values for the corresponding field. field_names: A list of `strings`. List of strings containing proto field names. message_type: A `string`. Name of the proto message type to decode. descriptor_source: An optional `string`. Defaults to `"local://"`. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "EncodeProto", name, sizes, values, "field_names", field_names, "message_type", message_type, "descriptor_source", descriptor_source) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return encode_proto_eager_fallback( sizes, values, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( encode_proto, (), dict(sizes=sizes, values=values, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise # Add nodes to the TensorFlow graph. if not isinstance(field_names, (list, tuple)): raise TypeError( "Expected list for 'field_names' argument to " "'encode_proto' Op, not %r." % field_names) field_names = [_execute.make_str(_s, "field_names") for _s in field_names] message_type = _execute.make_str(message_type, "message_type") if descriptor_source is None: descriptor_source = "local://" descriptor_source = _execute.make_str(descriptor_source, "descriptor_source") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "EncodeProto", sizes=sizes, values=values, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( encode_proto, (), dict(sizes=sizes, values=values, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("field_names", _op.get_attr("field_names"), "message_type", _op.get_attr("message_type"), "descriptor_source", _op.get_attr("descriptor_source"), "Tinput_types", _op.get_attr("Tinput_types")) _inputs_flat = _op.inputs _execute.record_gradient( "EncodeProto", _inputs_flat, _attrs, _result) _result, = _result return _result
def sdca_optimizer_v2_eager_fallback(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptive, name, ctx): if not isinstance(sparse_example_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_example_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_example_indices) _attr_num_sparse_features = len(sparse_example_indices) if not isinstance(sparse_feature_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_feature_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_feature_indices) if len(sparse_feature_indices) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_feature_indices' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_feature_indices), _attr_num_sparse_features)) if not isinstance(sparse_indices, (list, tuple)): raise TypeError( "Expected list for 'sparse_indices' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_indices) if len(sparse_indices) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_indices' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_indices), _attr_num_sparse_features)) if not isinstance(sparse_weights, (list, tuple)): raise TypeError( "Expected list for 'sparse_weights' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_weights) if len(sparse_weights) != _attr_num_sparse_features: raise ValueError( "List argument 'sparse_weights' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'sparse_example_indices'." % (len(sparse_weights), _attr_num_sparse_features)) if not isinstance(sparse_feature_values, (list, tuple)): raise TypeError( "Expected list for 'sparse_feature_values' argument to " "'sdca_optimizer_v2' Op, not %r." % sparse_feature_values) _attr_num_sparse_features_with_values = len(sparse_feature_values) if not isinstance(dense_features, (list, tuple)): raise TypeError( "Expected list for 'dense_features' argument to " "'sdca_optimizer_v2' Op, not %r." % dense_features) _attr_num_dense_features = len(dense_features) if not isinstance(dense_weights, (list, tuple)): raise TypeError( "Expected list for 'dense_weights' argument to " "'sdca_optimizer_v2' Op, not %r." % dense_weights) if len(dense_weights) != _attr_num_dense_features: raise ValueError( "List argument 'dense_weights' to 'sdca_optimizer_v2' Op with length %d " "must match length %d of argument 'dense_features'." % (len(dense_weights), _attr_num_dense_features)) loss_type = _execute.make_str(loss_type, "loss_type") l1 = _execute.make_float(l1, "l1") l2 = _execute.make_float(l2, "l2") num_loss_partitions = _execute.make_int(num_loss_partitions, "num_loss_partitions") num_inner_iterations = _execute.make_int(num_inner_iterations, "num_inner_iterations") if adaptive is None: adaptive = True adaptive = _execute.make_bool(adaptive, "adaptive") sparse_example_indices = _ops.convert_n_to_tensor(sparse_example_indices, _dtypes.int64) sparse_feature_indices = _ops.convert_n_to_tensor(sparse_feature_indices, _dtypes.int64) sparse_feature_values = _ops.convert_n_to_tensor(sparse_feature_values, _dtypes.float32) dense_features = _ops.convert_n_to_tensor(dense_features, _dtypes.float32) example_weights = _ops.convert_to_tensor(example_weights, _dtypes.float32) example_labels = _ops.convert_to_tensor(example_labels, _dtypes.float32) sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64) sparse_weights = _ops.convert_n_to_tensor(sparse_weights, _dtypes.float32) dense_weights = _ops.convert_n_to_tensor(dense_weights, _dtypes.float32) example_state_data = _ops.convert_to_tensor(example_state_data, _dtypes.float32) _inputs_flat = list(sparse_example_indices) + list(sparse_feature_indices) + list(sparse_feature_values) + list(dense_features) + [example_weights, example_labels] + list(sparse_indices) + list(sparse_weights) + list(dense_weights) + [example_state_data] _attrs = ("loss_type", loss_type, "adaptive", adaptive, "num_sparse_features", _attr_num_sparse_features, "num_sparse_features_with_values", _attr_num_sparse_features_with_values, "num_dense_features", _attr_num_dense_features, "l1", l1, "l2", l2, "num_loss_partitions", num_loss_partitions, "num_inner_iterations", num_inner_iterations) _result = _execute.execute(b"SdcaOptimizerV2", _attr_num_sparse_features + _attr_num_dense_features + 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "SdcaOptimizerV2", _inputs_flat, _attrs, _result) _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]] + _result[1 + _attr_num_sparse_features:] _result = _result[:2] + [_result[2:]] _result = _SdcaOptimizerV2Output._make(_result) return _result
def bitwise_and(x, y, name=None): r"""Elementwise computes the bitwise AND of `x` and `y`. The result will have those bits set, that are set in both `x` and `y`. The computation is performed on the underlying representations of `x` and `y`. For example: ```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64] for dtype in dtype_list: lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) res = bitwise_ops.bitwise_and(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE ``` Args: x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "BitwiseAnd", name, tld.op_callbacks, x, y) return _result except _core._FallbackException: try: return bitwise_and_eager_fallback(x, y, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(bitwise_and, x=x, y=y, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper("BitwiseAnd", x=x, y=y, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(bitwise_and, x=x, y=y, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient("BitwiseAnd", _inputs_flat, _attrs, _result) _result, = _result return _result
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None): r"""Computes the LSTM cell backward propagation for 1 timestep. This implementation is to be used in conjunction of LSTMBlockCell. Args: x: A `Tensor`. Must be one of the following types: `float32`. The input to the LSTM cell, shape (batch_size, num_inputs). cs_prev: A `Tensor`. Must have the same type as `x`. The previous cell state. h_prev: A `Tensor`. Must have the same type as `x`. The previous h state. w: A `Tensor`. Must have the same type as `x`. The weight matrix. wci: A `Tensor`. Must have the same type as `x`. The weight matrix for input gate peephole connection. wcf: A `Tensor`. Must have the same type as `x`. The weight matrix for forget gate peephole connection. wco: A `Tensor`. Must have the same type as `x`. The weight matrix for output gate peephole connection. b: A `Tensor`. Must have the same type as `x`. The bias vector. i: A `Tensor`. Must have the same type as `x`. The input gate. cs: A `Tensor`. Must have the same type as `x`. The cell state before the tanh. f: A `Tensor`. Must have the same type as `x`. The forget gate. o: A `Tensor`. Must have the same type as `x`. The output gate. ci: A `Tensor`. Must have the same type as `x`. The cell input. co: A `Tensor`. Must have the same type as `x`. The cell after the tanh. cs_grad: A `Tensor`. Must have the same type as `x`. The current gradient of cs. h_grad: A `Tensor`. Must have the same type as `x`. The gradient of h vector. use_peephole: A `bool`. Whether the cell uses peephole connections. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad). cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped. dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o]. wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped. wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped. wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped. """ use_peephole = _execute.make_bool(use_peephole, "use_peephole") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "LSTMBlockCellGrad", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("use_peephole", _op.get_attr("use_peephole"), "T", _op.get_attr("T")) else: _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T _attr_T = _attr_T.as_datatype_enum _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad] _attrs = ("use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"LSTMBlockCellGrad", 5, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name) _result = _LSTMBlockCellGradOutput._make(_result) return _result
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None): r"""Computes the LSTM cell forward propagation for all the time steps. This is equivalent to applying LSTMBlockCell in a loop, like so: ```python for x1 in unpack(x): i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( x1, cs_prev, h_prev, w, wci, wcf, wco, b) cs_prev = cs1 h_prev = h1 i.append(i1) cs.append(cs1) f.append(f1) o.append(o1) ci.append(ci1) co.append(co1) h.append(h1) return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) ``` Args: seq_len_max: A `Tensor` of type `int64`. Maximum time length actually used by this input. Outputs are padded with zeros beyond this length. x: A `Tensor`. Must be one of the following types: `float32`. The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: A `Tensor`. Must have the same type as `x`. Value of the initial cell state. h_prev: A `Tensor`. Must have the same type as `x`. Initial output of cell (to be used for peephole). w: A `Tensor`. Must have the same type as `x`. The weight matrix. wci: A `Tensor`. Must have the same type as `x`. The weight matrix for input gate peephole connection. wcf: A `Tensor`. Must have the same type as `x`. The weight matrix for forget gate peephole connection. wco: A `Tensor`. Must have the same type as `x`. The weight matrix for output gate peephole connection. b: A `Tensor`. Must have the same type as `x`. The bias vector. forget_bias: An optional `float`. Defaults to `1`. The forget gate bias. cell_clip: An optional `float`. Defaults to `3`. Value to clip the 'cs' value to. use_peephole: An optional `bool`. Defaults to `False`. Whether to use peephole weights. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence. cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence. f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence. o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence. ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence. co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence. h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence. """ if forget_bias is None: forget_bias = 1 forget_bias = _execute.make_float(forget_bias, "forget_bias") if cell_clip is None: cell_clip = 3 cell_clip = _execute.make_float(cell_clip, "cell_clip") if use_peephole is None: use_peephole = False use_peephole = _execute.make_bool(use_peephole, "use_peephole") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip", _op.get_attr("cell_clip"), "use_peephole", _op.get_attr("use_peephole"), "T", _op.get_attr("T")) else: _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T _attr_T = _attr_T.as_datatype_enum seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64) _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b] _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip, "use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BlockLSTM", _inputs_flat, _attrs, _result, name) _result = _BlockLSTMOutput._make(_result) return _result
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None): r"""Computes the LSTM cell forward propagation for 1 time step. This implementation uses 1 weight matrix and 1 bias vector, and there's an optional peephole connection. This kernel op implements the following mathematical equations: ```python xh = [x, h_prev] [i, f, ci, o] = xh * w + b f = f + forget_bias if not use_peephole: wci = wcf = wco = 0 i = sigmoid(cs_prev * wci + i) f = sigmoid(cs_prev * wcf + f) ci = tanh(ci) cs = ci .* i + cs_prev .* f cs = clip(cs, cell_clip) o = sigmoid(cs * wco + o) co = tanh(cs) h = co .* o ``` Args: x: A `Tensor`. Must be one of the following types: `float32`. The input to the LSTM cell, shape (batch_size, num_inputs). cs_prev: A `Tensor`. Must have the same type as `x`. Value of the cell state at previous time step. h_prev: A `Tensor`. Must have the same type as `x`. Output of the previous cell at previous time step. w: A `Tensor`. Must have the same type as `x`. The weight matrix. wci: A `Tensor`. Must have the same type as `x`. The weight matrix for input gate peephole connection. wcf: A `Tensor`. Must have the same type as `x`. The weight matrix for forget gate peephole connection. wco: A `Tensor`. Must have the same type as `x`. The weight matrix for output gate peephole connection. b: A `Tensor`. Must have the same type as `x`. The bias vector. forget_bias: An optional `float`. Defaults to `1`. The forget gate bias. cell_clip: An optional `float`. Defaults to `3`. Value to clip the 'cs' value to. use_peephole: An optional `bool`. Defaults to `False`. Whether to use peephole weights. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A `Tensor`. Has the same type as `x`. The input gate. cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh. f: A `Tensor`. Has the same type as `x`. The forget gate. o: A `Tensor`. Has the same type as `x`. The output gate. ci: A `Tensor`. Has the same type as `x`. The cell input. co: A `Tensor`. Has the same type as `x`. The cell after the tanh. h: A `Tensor`. Has the same type as `x`. The output h vector. """ if forget_bias is None: forget_bias = 1 forget_bias = _execute.make_float(forget_bias, "forget_bias") if cell_clip is None: cell_clip = 3 cell_clip = _execute.make_float(cell_clip, "cell_clip") if use_peephole is None: use_peephole = False use_peephole = _execute.make_bool(use_peephole, "use_peephole") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip", _op.get_attr("cell_clip"), "use_peephole", _op.get_attr("use_peephole"), "T", _op.get_attr("T")) else: _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T _attr_T = _attr_T.as_datatype_enum _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b] _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip, "use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LSTMBlockCell", _inputs_flat, _attrs, _result, name) _result = _LSTMBlockCellOutput._make(_result) return _result
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None): r"""Computes the LSTM cell backward propagation for the entire time sequence. This implementation is to be used in conjunction of LSTMBlock. Args: seq_len_max: A `Tensor` of type `int64`. Maximum time length actually used by this input. Outputs are padded with zeros beyond this length. x: A `Tensor`. Must be one of the following types: `float32`. The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: A `Tensor`. Must have the same type as `x`. Value of the initial cell state. h_prev: A `Tensor`. Must have the same type as `x`. Initial output of cell (to be used for peephole). w: A `Tensor`. Must have the same type as `x`. The weight matrix. wci: A `Tensor`. Must have the same type as `x`. The weight matrix for input gate peephole connection. wcf: A `Tensor`. Must have the same type as `x`. The weight matrix for forget gate peephole connection. wco: A `Tensor`. Must have the same type as `x`. The weight matrix for output gate peephole connection. b: A `Tensor`. Must have the same type as `x`. The bias vector. i: A `Tensor`. Must have the same type as `x`. The input gate over the whole time sequence. cs: A `Tensor`. Must have the same type as `x`. The cell state before the tanh over the whole time sequence. f: A `Tensor`. Must have the same type as `x`. The forget gate over the whole time sequence. o: A `Tensor`. Must have the same type as `x`. The output gate over the whole time sequence. ci: A `Tensor`. Must have the same type as `x`. The cell input over the whole time sequence. co: A `Tensor`. Must have the same type as `x`. The cell after the tanh over the whole time sequence. h: A `Tensor`. Must have the same type as `x`. The output h vector over the whole time sequence. cs_grad: A `Tensor`. Must have the same type as `x`. The current gradient of cs. h_grad: A `Tensor`. Must have the same type as `x`. The gradient of h vector. use_peephole: A `bool`. Whether to use peephole weights. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad). x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped. cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped. h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped. w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped. wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped. wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped. wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped. b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped. """ use_peephole = _execute.make_bool(use_peephole, "use_peephole") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "BlockLSTMGrad", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("use_peephole", _op.get_attr("use_peephole"), "T", _op.get_attr("T")) else: _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx) (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T _attr_T = _attr_T.as_datatype_enum seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64) _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad] _attrs = ("use_peephole", use_peephole, "T", _attr_T) _result = _execute.execute(b"BlockLSTMGrad", 8, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BlockLSTMGrad", _inputs_flat, _attrs, _result, name) _result = _BlockLSTMGradOutput._make(_result) return _result
def xla_launch(constants, args, resources, Tresults, function, name=None): r"""XLA Launch Op. For use by the XLA JIT only. Args: constants: A list of `Tensor` objects. args: A list of `Tensor` objects. resources: A list of `Tensor` objects with type `resource`. Tresults: A list of `tf.DTypes`. function: A function decorated with @Defun. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `Tresults`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "XlaLaunch", name, _ctx.post_execution_callbacks, constants, args, resources, "Tresults", Tresults, "function", function) return _result except _core._FallbackException: try: return xla_launch_eager_fallback(constants, args, resources, Tresults=Tresults, function=function, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(xla_launch, constants=constants, args=args, resources=resources, Tresults=Tresults, function=function, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(resources, (list, tuple)): raise TypeError("Expected list for 'resources' argument to " "'xla_launch' Op, not %r." % resources) _attr_Nresources = len(resources) if not isinstance(Tresults, (list, tuple)): raise TypeError("Expected list for 'Tresults' argument to " "'xla_launch' Op, not %r." % Tresults) Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults] try: _, _, _op = _op_def_lib._apply_op_helper("XlaLaunch", constants=constants, args=args, resources=resources, Tresults=Tresults, function=function, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(xla_launch, constants=constants, args=args, resources=resources, Tresults=Tresults, function=function, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] if not _result: return _op _inputs_flat = _op.inputs _attrs = ("Tconstants", _op.get_attr("Tconstants"), "Targs", _op.get_attr("Targs"), "Nresources", _op.get_attr("Nresources"), "Tresults", _op.get_attr("Tresults"), "function", _op.get_attr("function")) _execute.record_gradient("XlaLaunch", _inputs_flat, _attrs, _result, name) return _result
def collective_reduce(input, group_size, group_key, instance_key, merge_op, final_op, subdiv_offsets, wait_for=[], name=None): r"""Mutually reduces multiple tensors of identical type and shape. Args: input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`. group_size: An `int`. group_key: An `int`. instance_key: An `int`. merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`. final_op: A `string` from: `"Id", "Div"`. subdiv_offsets: A list of `ints`. wait_for: An optional list of `ints`. Defaults to `[]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "CollectiveReduce", name, _ctx._post_execution_callbacks, input, "group_size", group_size, "group_key", group_key, "instance_key", instance_key, "merge_op", merge_op, "final_op", final_op, "subdiv_offsets", subdiv_offsets, "wait_for", wait_for) return _result except _core._FallbackException: try: return collective_reduce_eager_fallback( input, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. group_size = _execute.make_int(group_size, "group_size") group_key = _execute.make_int(group_key, "group_key") instance_key = _execute.make_int(instance_key, "instance_key") merge_op = _execute.make_str(merge_op, "merge_op") final_op = _execute.make_str(final_op, "final_op") if not isinstance(subdiv_offsets, (list, tuple)): raise TypeError("Expected list for 'subdiv_offsets' argument to " "'collective_reduce' Op, not %r." % subdiv_offsets) subdiv_offsets = [ _execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets ] if wait_for is None: wait_for = [] if not isinstance(wait_for, (list, tuple)): raise TypeError("Expected list for 'wait_for' argument to " "'collective_reduce' Op, not %r." % wait_for) wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for] _, _, _op = _op_def_lib._apply_op_helper("CollectiveReduce", input=input, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"), "group_key", _op.get_attr("group_key"), "instance_key", _op.get_attr("instance_key"), "merge_op", _op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"), "subdiv_offsets", _op.get_attr("subdiv_offsets"), "wait_for", _op.get_attr("wait_for")) _execute.record_gradient("CollectiveReduce", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def _ctc_beam_search_decoder(inputs, sequence_length, beam_width, top_paths, merge_repeated=True, name=None): r"""Performs beam search decoding on the logits given in input. A note about the attribute merge_repeated: For the beam search decoder, this means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the top path is "A B B B B", "A B" is returned if merge_repeated = True but "A B B B B" is returned if merge_repeated = False. Args: inputs: A `Tensor` of type `float32`. 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. sequence_length: A `Tensor` of type `int32`. A vector containing sequence lengths, size `(batch)`. beam_width: An `int` that is `>= 1`. A scalar >= 0 (beam search beam width). top_paths: An `int` that is `>= 1`. A scalar >= 0, <= beam_width (controls output size). merge_repeated: An optional `bool`. Defaults to `True`. If true, merge repeated classes in output. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability). decoded_indices: A list of `top_paths` `Tensor` objects with type `int64`. A list (length: top_paths) of indices matrices. Matrix j, size `(total_decoded_outputs[j] x 2)`, has indices of a `SparseTensor<int64, 2>`. The rows store: [batch, time]. decoded_values: A list of `top_paths` `Tensor` objects with type `int64`. A list (length: top_paths) of values vectors. Vector j, size `(length total_decoded_outputs[j])`, has the values of a `SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j. decoded_shape: A list of `top_paths` `Tensor` objects with type `int64`. A list (length: top_paths) of shape vector. Vector j, size `(2)`, stores the shape of the decoded `SparseTensor[j]`. Its values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `Tensor` of type `float32`. A matrix, shaped: `(batch_size x top_paths)`. The sequence log-probabilities. """ beam_width = _execute.make_int(beam_width, "beam_width") top_paths = _execute.make_int(top_paths, "top_paths") if merge_repeated is None: merge_repeated = True merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "CTCBeamSearchDecoder", inputs=inputs, sequence_length=sequence_length, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("beam_width", _op.get_attr("beam_width"), "top_paths", _op.get_attr("top_paths"), "merge_repeated", _op.get_attr("merge_repeated")) else: inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32) _inputs_flat = [inputs, sequence_length] _attrs = ("beam_width", beam_width, "top_paths", top_paths, "merge_repeated", merge_repeated) _result = _execute.execute(b"CTCBeamSearchDecoder", top_paths + top_paths + top_paths + 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("CTCBeamSearchDecoder", _inputs_flat, _attrs, _result, name) _result = [_result[:top_paths]] + _result[top_paths:] _result = _result[:1] + [_result[1:1 + top_paths] ] + _result[1 + top_paths:] _result = _result[:2] + [_result[2:2 + top_paths] ] + _result[2 + top_paths:] _result = _CTCBeamSearchDecoderOutput._make(_result) return _result
def collective_gather(input, group_size, group_key, instance_key, shape, name=None): r"""Mutually accumulates multiple tensors of identical type and shape. Args: input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`. group_size: An `int`. group_key: An `int`. instance_key: An `int`. shape: A `tf.TensorShape` or list of `ints`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "CollectiveGather", name, _ctx._post_execution_callbacks, input, "group_size", group_size, "group_key", group_key, "instance_key", instance_key, "shape", shape) return _result except _core._FallbackException: try: return collective_gather_eager_fallback( input, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. group_size = _execute.make_int(group_size, "group_size") group_key = _execute.make_int(group_key, "group_key") instance_key = _execute.make_int(instance_key, "instance_key") shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper("CollectiveGather", input=input, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"), "group_key", _op.get_attr("group_key"), "instance_key", _op.get_attr("instance_key"), "shape", _op.get_attr("shape")) _execute.record_gradient("CollectiveGather", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def _ctc_loss(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, name=None): r"""Calculates the CTC Loss (log probability) for each batch entry. Also calculates the gradient. This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM. Args: inputs: A `Tensor` of type `float32`. 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. labels_indices: A `Tensor` of type `int64`. The indices of a `SparseTensor<int32, 2>`. `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for `(batch b, time t)`. labels_values: A `Tensor` of type `int32`. The values (labels) associated with the given batch and time. sequence_length: A `Tensor` of type `int32`. A vector containing sequence lengths (batch). preprocess_collapse_repeated: An optional `bool`. Defaults to `False`. Scalar, if true then repeated labels are collapsed prior to the CTC calculation. ctc_merge_repeated: An optional `bool`. Defaults to `True`. Scalar. If set to false, *during* CTC calculation repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified version of CTC. ignore_longer_outputs_than_inputs: An optional `bool`. Defaults to `False`. Scalar. If set to true, during CTC calculation, items that have longer output sequences than input sequences are skipped: they don't contribute to the loss term and have zero-gradient. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (loss, gradient). loss: A `Tensor` of type `float32`. A vector (batch) containing log-probabilities. gradient: A `Tensor` of type `float32`. The gradient of `loss`. 3-D, shape: `(max_time x batch_size x num_classes)`. """ if preprocess_collapse_repeated is None: preprocess_collapse_repeated = False preprocess_collapse_repeated = _execute.make_bool( preprocess_collapse_repeated, "preprocess_collapse_repeated") if ctc_merge_repeated is None: ctc_merge_repeated = True ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated, "ctc_merge_repeated") if ignore_longer_outputs_than_inputs is None: ignore_longer_outputs_than_inputs = False ignore_longer_outputs_than_inputs = _execute.make_bool( ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "CTCLoss", inputs=inputs, labels_indices=labels_indices, labels_values=labels_values, sequence_length=sequence_length, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("preprocess_collapse_repeated", _op.get_attr("preprocess_collapse_repeated"), "ctc_merge_repeated", _op.get_attr("ctc_merge_repeated"), "ignore_longer_outputs_than_inputs", _op.get_attr("ignore_longer_outputs_than_inputs")) else: inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) labels_indices = _ops.convert_to_tensor(labels_indices, _dtypes.int64) labels_values = _ops.convert_to_tensor(labels_values, _dtypes.int32) sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32) _inputs_flat = [inputs, labels_indices, labels_values, sequence_length] _attrs = ("preprocess_collapse_repeated", preprocess_collapse_repeated, "ctc_merge_repeated", ctc_merge_repeated, "ignore_longer_outputs_than_inputs", ignore_longer_outputs_than_inputs) _result = _execute.execute(b"CTCLoss", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("CTCLoss", _inputs_flat, _attrs, _result, name) _result = _CTCLossOutput._make(_result) return _result
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None): r"""Selects num_to_sample rows of input using the KMeans++ criterion. Rows of points are assumed to be input points. One row is selected at random. Subsequent rows are sampled with probability proportional to the squared L2 distance from the nearest row selected thus far till num_to_sample rows have been sampled. Args: points: A `Tensor` of type `float32`. Matrix of shape (n, d). Rows are assumed to be input points. num_to_sample: A `Tensor` of type `int64`. Scalar. The number of rows to sample. This value must not be larger than n. seed: A `Tensor` of type `int64`. Scalar. Seed for initializing the random number generator. num_retries_per_sample: A `Tensor` of type `int64`. Scalar. For each row that is sampled, this parameter specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "KmeansPlusPlusInitialization", name, points, num_to_sample, seed, num_retries_per_sample) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return kmeans_plus_plus_initialization_eager_fallback( points, num_to_sample, seed, num_retries_per_sample, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "KmeansPlusPlusInitialization", points=points, num_to_sample=num_to_sample, seed=seed, num_retries_per_sample=num_retries_per_sample, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = () _inputs_flat = _op.inputs _execute.record_gradient("KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result) _result, = _result return _result
def _tensor_summary(tensor, description="", labels=[], display_name="", name=None): r"""Outputs a `Summary` protocol buffer with a tensor. This op is being phased out in favor of TensorSummaryV2, which lets callers pass a tag as well as a serialized SummaryMetadata proto string that contains plugin-specific data. We will keep this op to maintain backwards compatibility. Args: tensor: A `Tensor`. A tensor to serialize. description: An optional `string`. Defaults to `""`. A json-encoded SummaryDescription proto. labels: An optional list of `strings`. Defaults to `[]`. An unused list of strings. display_name: An optional `string`. Defaults to `""`. An unused string. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ if description is None: description = "" description = _execute.make_str(description, "description") if labels is None: labels = [] if not isinstance(labels, (list, tuple)): raise TypeError("Expected list for 'labels' argument to " "'tensor_summary' Op, not %r." % labels) labels = [_execute.make_str(_s, "labels") for _s in labels] if display_name is None: display_name = "" display_name = _execute.make_str(display_name, "display_name") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("TensorSummary", tensor=tensor, description=description, labels=labels, display_name=display_name, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "description", _op.get_attr("description"), "labels", _op.get_attr("labels"), "display_name", _op.get_attr("display_name")) else: _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], _ctx) _inputs_flat = [tensor] _attrs = ("T", _attr_T, "description", description, "labels", labels, "display_name", display_name) _result = _execute.execute(b"TensorSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("TensorSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def nccl_all_reduce(input, reduction, num_devices, shared_name, name=None): r"""Outputs a tensor containing the reduction across all input tensors passed to ops within the same `shared_name. The graph should be constructed so if one op runs with shared_name value `c`, then `num_devices` ops will run with shared_name value `c`. Failure to do so will cause the graph execution to fail to complete. Args: input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`. the input to the reduction reduction: A `string` from: `"min", "max", "prod", "sum"`. the reduction operation to perform. num_devices: An `int`. The number of devices participating in this reduction. shared_name: A `string`. Identifier that shared between ops of the same reduction. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. the value of the reduction across all `num_devices` devices. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: reduction = _execute.make_str(reduction, "reduction") num_devices = _execute.make_int(num_devices, "num_devices") shared_name = _execute.make_str(shared_name, "shared_name") _, _, _op = _op_def_lib._apply_op_helper("NcclAllReduce", input=input, reduction=reduction, num_devices=num_devices, shared_name=shared_name, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("reduction", _op.get_attr("reduction"), "T", _op.get_attr("T"), "num_devices", _op.get_attr("num_devices"), "shared_name", _op.get_attr("shared_name")) _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "NcclAllReduce", name, _ctx._post_execution_callbacks, input, "reduction", reduction, "num_devices", num_devices, "shared_name", shared_name) return _result except _core._FallbackException: return nccl_all_reduce_eager_fallback(input, reduction=reduction, num_devices=num_devices, shared_name=shared_name, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None)
def audio_summary(tag, tensor, sample_rate, max_outputs=3, name=None): r"""Outputs a `Summary` protocol buffer with audio. The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`. sample_rate: A `float`. The sample rate of the signal in hertz. max_outputs: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate audio for. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "AudioSummary", name, tld.op_callbacks, tag, tensor, "sample_rate", sample_rate, "max_outputs", max_outputs) return _result except _core._FallbackException: try: return audio_summary_eager_fallback(tag, tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. sample_rate = _execute.make_float(sample_rate, "sample_rate") if max_outputs is None: max_outputs = 3 max_outputs = _execute.make_int(max_outputs, "max_outputs") _, _, _op, _outputs = _op_def_library._apply_op_helper( "AudioSummary", tag=tag, tensor=tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("sample_rate", _op.get_attr("sample_rate"), "max_outputs", _op._get_attr_int("max_outputs")) _inputs_flat = _op.inputs _execute.record_gradient("AudioSummary", _inputs_flat, _attrs, _result) _result, = _result return _result
def _ctc_greedy_decoder(inputs, sequence_length, merge_repeated=False, name=None): r"""Performs greedy decoding on the logits given in inputs. A note about the attribute merge_repeated: if enabled, when consecutive logits' maximum indices are the same, only the first of these is emitted. Labeling the blank '*', the sequence "A B B * B B" becomes "A B B" if merge_repeated = True and "A B B B B" if merge_repeated = False. Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank, index `(num_classes - 1)`, no new element is emitted. Args: inputs: A `Tensor` of type `float32`. 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. sequence_length: A `Tensor` of type `int32`. A vector containing sequence lengths, size `(batch_size)`. merge_repeated: An optional `bool`. Defaults to `False`. If True, merge repeated classes in output. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability). decoded_indices: A `Tensor` of type `int64`. Indices matrix, size `(total_decoded_outputs x 2)`, of a `SparseTensor<int64, 2>`. The rows store: [batch, time]. decoded_values: A `Tensor` of type `int64`. Values vector, size: `(total_decoded_outputs)`, of a `SparseTensor<int64, 2>`. The vector stores the decoded classes. decoded_shape: A `Tensor` of type `int64`. Shape vector, size `(2)`, of the decoded SparseTensor. Values are: `[batch_size, max_decoded_length]`. log_probability: A `Tensor` of type `float32`. Matrix, size `(batch_size x 1)`, containing sequence log-probabilities. """ if merge_repeated is None: merge_repeated = False merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "CTCGreedyDecoder", inputs=inputs, sequence_length=sequence_length, merge_repeated=merge_repeated, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("merge_repeated", _op.get_attr("merge_repeated")) else: inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32) _inputs_flat = [inputs, sequence_length] _attrs = ("merge_repeated", merge_repeated) _result = _execute.execute(b"CTCGreedyDecoder", 4, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("CTCGreedyDecoder", _inputs_flat, _attrs, _result, name) _result = _CTCGreedyDecoderOutput._make(_result) return _result
def trt_engine_op(in_tensor, serialized_segment, OutT, workspace_size_bytes, precision_mode, segment_func="", max_cached_engines_count=1, calibration_data="", use_calibration=True, segment_funcdef_name="", cached_engine_batches=[], fixed_input_size=True, input_shapes=[], output_shapes=[], static_engine=True, name=None): r"""TODO: add doc. Args: in_tensor: A list of `Tensor` objects with types from: `int8`, `half`, `float32`, `int32`. serialized_segment: A `string`. OutT: A list of `tf.DTypes` from: `tf.int8, tf.half, tf.float32, tf.int32` that has length `>= 1`. workspace_size_bytes: An `int`. precision_mode: A `string` from: `"FP32", "FP16", "INT8"`. segment_func: An optional function decorated with @Defun. Defaults to `""`. max_cached_engines_count: An optional `int`. Defaults to `1`. calibration_data: An optional `string`. Defaults to `""`. use_calibration: An optional `bool`. Defaults to `True`. segment_funcdef_name: An optional `string`. Defaults to `""`. cached_engine_batches: An optional list of `ints`. Defaults to `[]`. fixed_input_size: An optional `bool`. Defaults to `True`. input_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. static_engine: An optional `bool`. Defaults to `True`. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `OutT`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "TRTEngineOp", name, _ctx.post_execution_callbacks, in_tensor, "serialized_segment", serialized_segment, "segment_func", segment_func, "OutT", OutT, "max_cached_engines_count", max_cached_engines_count, "workspace_size_bytes", workspace_size_bytes, "precision_mode", precision_mode, "calibration_data", calibration_data, "use_calibration", use_calibration, "segment_funcdef_name", segment_funcdef_name, "cached_engine_batches", cached_engine_batches, "fixed_input_size", fixed_input_size, "input_shapes", input_shapes, "output_shapes", output_shapes, "static_engine", static_engine) return _result except _core._FallbackException: try: return trt_engine_op_eager_fallback( in_tensor, serialized_segment=serialized_segment, segment_func=segment_func, OutT=OutT, max_cached_engines_count=max_cached_engines_count, workspace_size_bytes=workspace_size_bytes, precision_mode=precision_mode, calibration_data=calibration_data, use_calibration=use_calibration, segment_funcdef_name=segment_funcdef_name, cached_engine_batches=cached_engine_batches, fixed_input_size=fixed_input_size, input_shapes=input_shapes, output_shapes=output_shapes, static_engine=static_engine, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( trt_engine_op, in_tensor=in_tensor, serialized_segment=serialized_segment, OutT=OutT, workspace_size_bytes=workspace_size_bytes, precision_mode=precision_mode, segment_func=segment_func, max_cached_engines_count=max_cached_engines_count, calibration_data=calibration_data, use_calibration=use_calibration, segment_funcdef_name=segment_funcdef_name, cached_engine_batches=cached_engine_batches, fixed_input_size=fixed_input_size, input_shapes=input_shapes, output_shapes=output_shapes, static_engine=static_engine, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. serialized_segment = _execute.make_str(serialized_segment, "serialized_segment") if not isinstance(OutT, (list, tuple)): raise TypeError("Expected list for 'OutT' argument to " "'trt_engine_op' Op, not %r." % OutT) OutT = [_execute.make_type(_t, "OutT") for _t in OutT] workspace_size_bytes = _execute.make_int(workspace_size_bytes, "workspace_size_bytes") precision_mode = _execute.make_str(precision_mode, "precision_mode") if segment_func is None: segment_func = "" if max_cached_engines_count is None: max_cached_engines_count = 1 max_cached_engines_count = _execute.make_int(max_cached_engines_count, "max_cached_engines_count") if calibration_data is None: calibration_data = "" calibration_data = _execute.make_str(calibration_data, "calibration_data") if use_calibration is None: use_calibration = True use_calibration = _execute.make_bool(use_calibration, "use_calibration") if segment_funcdef_name is None: segment_funcdef_name = "" segment_funcdef_name = _execute.make_str(segment_funcdef_name, "segment_funcdef_name") if cached_engine_batches is None: cached_engine_batches = [] if not isinstance(cached_engine_batches, (list, tuple)): raise TypeError( "Expected list for 'cached_engine_batches' argument to " "'trt_engine_op' Op, not %r." % cached_engine_batches) cached_engine_batches = [ _execute.make_int(_i, "cached_engine_batches") for _i in cached_engine_batches ] if fixed_input_size is None: fixed_input_size = True fixed_input_size = _execute.make_bool(fixed_input_size, "fixed_input_size") if input_shapes is None: input_shapes = [] if not isinstance(input_shapes, (list, tuple)): raise TypeError("Expected list for 'input_shapes' argument to " "'trt_engine_op' Op, not %r." % input_shapes) input_shapes = [ _execute.make_shape(_s, "input_shapes") for _s in input_shapes ] if output_shapes is None: output_shapes = [] if not isinstance(output_shapes, (list, tuple)): raise TypeError("Expected list for 'output_shapes' argument to " "'trt_engine_op' Op, not %r." % output_shapes) output_shapes = [ _execute.make_shape(_s, "output_shapes") for _s in output_shapes ] if static_engine is None: static_engine = True static_engine = _execute.make_bool(static_engine, "static_engine") try: _, _, _op = _op_def_lib._apply_op_helper( "TRTEngineOp", in_tensor=in_tensor, serialized_segment=serialized_segment, OutT=OutT, workspace_size_bytes=workspace_size_bytes, precision_mode=precision_mode, segment_func=segment_func, max_cached_engines_count=max_cached_engines_count, calibration_data=calibration_data, use_calibration=use_calibration, segment_funcdef_name=segment_funcdef_name, cached_engine_batches=cached_engine_batches, fixed_input_size=fixed_input_size, input_shapes=input_shapes, output_shapes=output_shapes, static_engine=static_engine, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( trt_engine_op, in_tensor=in_tensor, serialized_segment=serialized_segment, OutT=OutT, workspace_size_bytes=workspace_size_bytes, precision_mode=precision_mode, segment_func=segment_func, max_cached_engines_count=max_cached_engines_count, calibration_data=calibration_data, use_calibration=use_calibration, segment_funcdef_name=segment_funcdef_name, cached_engine_batches=cached_engine_batches, fixed_input_size=fixed_input_size, input_shapes=input_shapes, output_shapes=output_shapes, static_engine=static_engine, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("serialized_segment", _op.get_attr("serialized_segment"), "segment_func", _op.get_attr("segment_func"), "InT", _op.get_attr("InT"), "OutT", _op.get_attr("OutT"), "max_cached_engines_count", _op.get_attr("max_cached_engines_count"), "workspace_size_bytes", _op.get_attr("workspace_size_bytes"), "precision_mode", _op.get_attr("precision_mode"), "calibration_data", _op.get_attr("calibration_data"), "use_calibration", _op.get_attr("use_calibration"), "segment_funcdef_name", _op.get_attr("segment_funcdef_name"), "cached_engine_batches", _op.get_attr("cached_engine_batches"), "fixed_input_size", _op.get_attr("fixed_input_size"), "input_shapes", _op.get_attr("input_shapes"), "output_shapes", _op.get_attr("output_shapes"), "static_engine", _op.get_attr("static_engine")) _execute.record_gradient("TRTEngineOp", _inputs_flat, _attrs, _result, name) return _result
def _image_summary( tag, tensor, max_images=3, bad_color=_execute. make_tensor( """dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255""", "bad_color"), name=None): r"""Outputs a `Summary` protocol buffer with images. The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. The `bad_color` argument is the color to use in the generated images for non-finite input values. It is a `unit8` 1-D tensor of length `channels`. Each element must be in the range `[0, 255]` (It represents the value of a pixel in the output image). Non-finite values in the input tensor are replaced by this tensor in the output image. The default value is the color red. Args: tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`, `float64`. 4-D of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. max_images: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate images for. bad_color: An optional `tf.TensorProto`. Defaults to `dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255`. Color to use for pixels with non-finite values. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ if max_images is None: max_images = 3 max_images = _execute.make_int(max_images, "max_images") if bad_color is None: bad_color = _execute.make_tensor( """dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255""", "bad_color") bad_color = _execute.make_tensor(bad_color, "bad_color") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper("ImageSummary", tag=tag, tensor=tensor, max_images=max_images, bad_color=bad_color, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("max_images", _op.get_attr("max_images"), "T", _op.get_attr("T"), "bad_color", _op.get_attr("bad_color")) else: _attr_T, (tensor, ) = _execute.args_to_matching_eager([tensor], _ctx, _dtypes.float32) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [tag, tensor] _attrs = ("max_images", max_images, "T", _attr_T, "bad_color", bad_color) _result = _execute.execute(b"ImageSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient("ImageSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def resampler_grad(data, warp, grad_output, name=None): r"""Resampler Grad op. Args: data: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. warp: A `Tensor`. Must have the same type as `data`. grad_output: A `Tensor`. Must have the same type as `data`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (grad_data, grad_warp). grad_data: A `Tensor`. Has the same type as `data`. grad_warp: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "ResamplerGrad", name, _ctx.post_execution_callbacks, data, warp, grad_output) _result = _ResamplerGradOutput._make(_result) return _result except _core._FallbackException: try: return resampler_grad_eager_fallback( data, warp, grad_output, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( resampler_grad, data=data, warp=warp, grad_output=grad_output, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "ResamplerGrad", data=data, warp=warp, grad_output=grad_output, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( resampler_grad, data=data, warp=warp, grad_output=grad_output, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op._get_attr_type("T")) _execute.record_gradient( "ResamplerGrad", _inputs_flat, _attrs, _result, name) _result = _ResamplerGradOutput._make(_result) return _result
def reduce_slice_sum(data, indices, axis, name=None): r"""Dynamically sum over the first dimension of a tensor according to start and end indices specified at 'index'. For example: ```prettyprint # if 'data' is [[ 1, 2, 3] [ 40, 50, 60] [ 700, 800, 900] [1000,2000,3000]], and 'indices' is [[0,1] [1,1] [0,2]], the output will be [[ 1, 2, 3] [ 0, 0, 0] [41,52,63]]. ``` The data must be at least rank 1. The indices must be of shape (?,2) where the first column is start indices and the second column is end indices. The end indices are not included in the reduce operation, which means, if you want to do a reduce over indices 0,1,2, then you should have start index 0 and end index 3. If end index is smaller than or equal to start, the result will be zero. If end index is out of bounds, then the reduce operation will automatically stop at the bound, so feel free to put a large number as your end of your index if you want to do the reduction until the bound. Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The source of data where the computation will be taken from. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. start, end indices that controls which part to be included. axis: A `Tensor` of type `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. the computed sum values. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "ReduceSliceSum", name, _ctx._post_execution_callbacks, data, indices, axis) return _result except _core._FallbackException: try: return reduce_slice_sum_eager_fallback(data, indices, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(reduce_slice_sum, data=data, indices=indices, axis=axis, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper("ReduceSliceSum", data=data, indices=indices, axis=axis, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(reduce_slice_sum, data=data, indices=indices, axis=axis, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient("ReduceSliceSum", _inputs_flat, _attrs, _result, name) _result, = _result return _result
def adjust_hsv_in_yiq(images, delta_h, scale_s, scale_v, name=None): r"""Adjust the YIQ hue of one or more images. `images` is a tensor of at least 3 dimensions. The last dimension is interpreted as channels, and must be three. We used linear transformation described in: beesbuzz.biz/code/hsv_color_transforms.php The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into YIQ space, rotated around the Y channel by delta_h in radians, multiplying the chrominance channels (I, Q) by scale_s, multiplying all channels (Y, I, Q) by scale_v, and then remapped back to RGB colorspace. Each operation described above is a linear transformation. Args: images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`. Images to adjust. At least 3-D. delta_h: A `Tensor` of type `float32`. A float scale that represents the hue rotation amount, in radians. Although delta_h can be any float value. scale_s: A `Tensor` of type `float32`. A float scale that represents the factor to multiply the saturation by. scale_s needs to be non-negative. scale_v: A `Tensor` of type `float32`. A float scale that represents the factor to multiply the value by. scale_v needs to be non-negative. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. The hsv-adjusted image or images. No clipping will be done in this op. The client can clip them using additional ops in their graph. """ _ctx = _context._context or _context.context() if _ctx is not None and _ctx._thread_local_data.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "AdjustHsvInYiq", name, _ctx.post_execution_callbacks, images, delta_h, scale_s, scale_v) return _result except _core._FallbackException: try: return adjust_hsv_in_yiq_eager_fallback(images, delta_h, scale_s, scale_v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch(adjust_hsv_in_yiq, images=images, delta_h=delta_h, scale_s=scale_s, scale_v=scale_v, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper("AdjustHsvInYiq", images=images, delta_h=delta_h, scale_s=scale_s, scale_v=scale_v, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(adjust_hsv_in_yiq, images=images, delta_h=delta_h, scale_s=scale_s, scale_v=scale_v, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op._get_attr_type("T")) _execute.record_gradient("AdjustHsvInYiq", _inputs_flat, _attrs, _result, name) _result, = _result return _result