def from_tensor(cls, tensor): """Convert a traced (composite)tensor to a representative KerasTensor.""" if isinstance(tensor, tf.Tensor): name = getattr(tensor, "name", None) type_spec = tf.type_spec_from_value(tensor) inferred_value = None if (type_spec.dtype == tf.int32 and type_spec.shape.rank is not None and type_spec.shape.rank < 2): # If this tensor might be representing shape information, # (dtype=int32, rank of 0 or 1, not too large to represent a # shape) we attempt to capture any value information # tensorflow's shape handling can extract from the current # scratch graph. # # Even though keras layers each trace in their own scratch # graph, this shape value info extraction allows us to capture a # sizable and useful subset of the C++ shape value inference TF # can do if all tf ops appear in the same graph when using shape # ops. # # Examples of things this cannot infer concrete dimensions for # that the full single-graph C++ shape inference sometimes can # are: # * cases where the shape tensor is cast out of int32 before # being manipulated w/ floating point numbers then converted # back # * cases where int32 tensors w/ rank >= 2 are manipulated # before being used as a shape tensor # * cases where int32 tensors too large to represent shapes are # manipulated to a smaller size before being used as a shape # tensor inferred_value = tf.ones(shape=tensor).shape if inferred_value.dims: inferred_value = inferred_value.as_list() if len(inferred_value) > _MAX_TENSOR_RANK: inferred_value = None else: inferred_value = None return KerasTensor(type_spec, inferred_value=inferred_value, name=name) else: # Fallback to the generic arbitrary-typespec KerasTensor name = getattr(tensor, "name", None) type_spec = tf.type_spec_from_value(tensor) return cls(type_spec, name=name)
def type_spec_from_value(value): """Grab type_spec without converting array-likes to tensors.""" if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, 'shape') and hasattr(value, 'dtype'): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value)
def type_spec_from_value(value): """Grab type_spec without converting array-likes to tensors.""" if is_extension_type(value): return value._type_spec # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, "shape") and hasattr(value, "dtype"): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value)
def get_json_type(obj): """Serializes any object to a JSON-serializable structure. Args: obj: the object to serialize Returns: JSON-serializable structure representing `obj`. Raises: TypeError: if `obj` cannot be serialized. """ # if obj is a serializable Keras class instance # e.g. optimizer, layer if hasattr(obj, 'get_config'): serialized = generic_utils.serialize_keras_object(obj) serialized['__passive_serialization__'] = True return serialized # if obj is any numpy type if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() # misc functions (e.g. loss function) if callable(obj): return obj.__name__ # if obj is a python 'type' if type(obj).__name__ == type.__name__: return obj.__name__ if isinstance(obj, tf.compat.v1.Dimension): return obj.value if isinstance(obj, tf.TensorShape): return obj.as_list() if isinstance(obj, tf.DType): return obj.name if isinstance(obj, collections.abc.Mapping): return dict(obj) if obj is Ellipsis: return {'class_name': '__ellipsis__'} if isinstance(obj, wrapt.ObjectProxy): return obj.__wrapped__ if isinstance(obj, tf.TypeSpec): try: type_spec_name = type_spec.get_name(type(obj)) return { 'class_name': 'TypeSpec', 'type_spec': type_spec_name, 'serialized': obj._serialize() } # pylint: disable=protected-access except ValueError: raise ValueError( f'Unable to serialize {obj} to JSON, because the TypeSpec ' f'class {type(obj)} has not been registered.') if isinstance(obj, tf.__internal__.CompositeTensor): spec = tf.type_spec_from_value(obj) tensors = [] for tensor in tf.nest.flatten(obj, expand_composites=True): tensors.append((tensor.dtype.name, tensor.numpy().tolist())) return { 'class_name': 'CompositeTensor', 'spec': get_json_type(spec), 'tensors': tensors } if isinstance(obj, enum.Enum): return obj.value raise TypeError( f'Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}.')
def _calc_unbatched_spec(x): if isinstance(x, tfp.distributions.Distribution): return None else: return nest_utils.remove_singleton_batch_spec_dim( tf.type_spec_from_value(x), outer_ndim=outer_ndim)
def remove_singleton_batch_spec_dim(t): # Convert tensor to its type-spec, and remove the batch dimension # from the spec. spec = tf.type_spec_from_value(t) return nest_utils.remove_singleton_batch_spec_dim(spec, outer_ndim=1)