_ = as_ref if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for IndexedSlices with dtype %s" % (dtype.name, value.dtype.name)) if value.dense_shape is None: raise ValueError( "Tensor conversion requested for IndexedSlices without dense_shape: %s" % str(value)) # TODO(mrry): Consider adding static shape information to # IndexedSlices, to avoid using numpy here. if not context.executing_eagerly(): dense_shape_value = tensor_util.constant_value(value.dense_shape) if dense_shape_value is not None: num_elements = np.prod(dense_shape_value) if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS: warnings.warn( "Converting sparse IndexedSlices to a dense Tensor with %d " "elements. This may consume a large amount of memory." % num_elements) else: warnings.warn( "Converting sparse IndexedSlices to a dense Tensor of unknown shape. " "This may consume a large amount of memory.") return math_ops.unsorted_segment_sum( value.values, value.indices, value.dense_shape[0], name=name) tensor_conversion_registry.register_tensor_conversion_function( IndexedSlices, _indexed_slices_to_tensor)
return TPUEncodedUInt8 class TPUEncodedUInt8(composite_tensor.CompositeTensor): def __init__(self, encoded, shape): self.encoded = encoded self.original_shape = shape self._spec = TPUEncodedUInt8Spec(encoded.shape, tf.TensorShape(shape)) @property def _type_spec(self): return self._spec tensor_conversion_registry.register_tensor_conversion_function( TPUEncodedUInt8, lambda value, *unused_args, **unused_kwargs: value.encoded) class TPUEncodedF32Spec(tf.TypeSpec): """Type specification for composite tensor TPUEncodedF32Spec.""" def __init__(self, encoded_shape, original_shape): self._value_specs = (tf.TensorSpec(encoded_shape, tf.float32), ) self.original_shape = original_shape @property def _component_specs(self): return self._value_specs def _to_components(self, value): return (value.encoded, )