예제 #1
0
def type_spec_from_nested(x):
    if isinstance(x, dict):
        return {k: type_spec_from_nested(v) for k, v in x.items()}
    elif isinstance(x, tuple):
        return tuple([type_spec_from_nested(v) for v in x])
    else:
        return tf.type_spec_from_value(x)
예제 #2
0
 def convert(self, tensor: TensorAlike) -> List[pa.Array]:
     """Converts the given TensorAlike to pa.Arrays after validating its spec."""
     if tf.__version__ < "2":
         if isinstance(tensor, np.ndarray):
             actual_spec = tf.TensorSpec(tensor.shape,
                                         tf.dtypes.as_dtype(tensor.dtype))
         elif isinstance(tensor, tf.compat.v1.SparseTensorValue):
             actual_spec = tf.SparseTensorSpec(tensor.dense_shape,
                                               tensor.values.dtype)
         elif isinstance(tensor, tf.compat.v1.ragged.RaggedTensorValue):
             actual_spec = tf.RaggedTensorSpec(
                 tensor.shape,
                 tensor.values.dtype,
                 ragged_rank=tensor.ragged_rank,
                 row_splits_dtype=tensor.row_splits.dtype)
         else:
             raise TypeError(
                 "Only ndarrays, SparseTensorValues and "
                 "RaggedTensorValues are supported with TF 1.x, "
                 "got {}".format(type(tensor)))
     else:
         actual_spec = tf.type_spec_from_value(tensor)
     if not self._type_spec.is_compatible_with(actual_spec):
         raise TypeError("Expected {} but got {}".format(
             self._type_spec, actual_spec))
     return self._convert_internal(tensor)
예제 #3
0
 def _maybe_convert_to_spec(p):
     if isinstance(p, distribution_utils.Params):
         return _convert_to_spec_and_remove_singleton_batch_dim(
             p, outer_ndim)
     elif tf.is_tensor(p):
         return nest_utils.remove_singleton_batch_spec_dim(
             tf.type_spec_from_value(p), outer_ndim=outer_ndim)
     else:
         return p
예제 #4
0
    def from_tensor(cls, tensor):
        """Convert a traced (composite)tensor to a representative KerasTensor."""
        if isinstance(tensor, tf.Tensor):
            name = getattr(tensor, 'name', None)
            type_spec = tf.type_spec_from_value(tensor)
            inferred_value = None
            if (type_spec.dtype == tf.int32
                    and type_spec.shape.rank is not None
                    and type_spec.shape.rank < 2):
                # If this tensor might be representing shape information,
                # (dtype=int32, rank of 0 or 1, not too large to represent a shape)
                # we attempt to capture any value information tensorflow's
                # shape handling can extract from the current scratch graph.
                #
                # Even though keras layers each trace in their own scratch
                # graph, this shape value info extraction allows us to capture
                # a sizable and useful subset of the C++ shape value inference TF can do
                # if all tf ops appear in the same graph when using shape ops.
                #
                # Examples of things this cannot infer concrete dimensions for
                # that the full single-graph C++ shape inference sometimes can are:
                # * cases where the shape tensor is cast out of int32 before being
                #   manipulated w/ floating point numbers then converted back
                # * cases where int32 tensors w/ rank >= 2 are manipulated before being
                #   used as a shape tensor
                # * cases where int32 tensors too large to represent shapes are
                #   manipulated to a smaller size before being used as a shape tensor
                inferred_value = tf.ones(shape=tensor).shape
                if inferred_value.dims:
                    inferred_value = inferred_value.as_list()
                    if len(inferred_value) > _MAX_TENSOR_RANK:
                        inferred_value = None
                else:
                    inferred_value = None

            return KerasTensor(type_spec,
                               inferred_value=inferred_value,
                               name=name)
        else:
            # Fallback to the generic arbitrary-typespec KerasTensor
            name = getattr(tensor, 'name', None)
            type_spec = tf.type_spec_from_value(tensor)
            return cls(type_spec, name=name)
예제 #5
0
파일: tf_utils.py 프로젝트: z-a-f/keras-1
def type_spec_from_value(value):
  """Grab type_spec without converting array-likes to tensors."""
  if is_extension_type(value):
    return value._type_spec  # pylint: disable=protected-access
  # Get a TensorSpec for array-like data without
  # converting the data to a Tensor
  if hasattr(value, 'shape') and hasattr(value, 'dtype'):
    return tf.TensorSpec(value.shape, value.dtype)
  else:
    return tf.type_spec_from_value(value)
예제 #6
0
 def get_callable_input_specs(fn):
     # TODO(b/170241499): Update after TF adds support for specs to model.
     input_specs = {}
     for input_name, input_tensor in zip(fn.input_names, fn.inputs):
         if hasattr(input_tensor, 'type_spec'):
             # "KerasTensor" types have type_spec attributes.
             type_spec = input_tensor.type_spec
         else:
             type_spec = tf.type_spec_from_value(input_tensor)
         input_specs[input_name] = type_spec
     return input_specs
예제 #7
0
 def _calc_unbatched_spec(x):
     if isinstance(x, tfp.distributions.Distribution):
         parameters = distribution_utils.get_parameters(x)
         parameter_specs = _convert_to_spec_and_remove_singleton_batch_dim(
             parameters, outer_ndim=outer_ndim)
         return distribution_utils.DistributionSpecV2(
             event_shape=x.event_shape,
             dtype=x.dtype,
             parameters=parameter_specs)
     else:
         return nest_utils.remove_singleton_batch_spec_dim(
             tf.type_spec_from_value(x), outer_ndim=outer_ndim)
예제 #8
0
def get_input_specs(
    model: Any,
    signature_name: Optional[Text] = None) -> Optional[Dict[Text, tf.TypeSpec]]:
  """Returns the input names and tensor specs associated with callable or None.

  Args:
    model: A model that is callable or contains a `signatures` attribute. If
      neither of these conditions are met, then None will be returned.
    signature_name: Optional name of signature to use. If not provided then
      either the default serving signature will be used (if model is not
      callable) or the model itself will be used (if the model is callable). If
      provided then model.signatures will be used regardless of whether the
      model is callable or not.

  Returns:
    Dict mapping input names to their associated tensor specs or None if no
    callable could be found.

  Raises:
    ValueError: If signature_name not found in model.signatures.
  """
  if not hasattr(model, 'signatures') and not is_callable_model(model):
    return None

  if not signature_name:
    # Special support for keras-based models.
    if is_callable_model(model):
      # TODO(b/170241499): Update after TF adds support for specs to model.
      input_specs = {}
      for input_name, input_tensor in zip(model.input_names, model.inputs):
        if hasattr(input_tensor, 'type_spec'):
          # "KerasTensor" types have type_spec attributes.
          type_spec = input_tensor.type_spec
        else:
          type_spec = tf.type_spec_from_value(input_tensor)
        input_specs[input_name] = type_spec
      return input_specs
    signature_name = get_default_signature_name(model)

  if signature_name not in model.signatures:
    raise ValueError('{} not found in model signatures: {}'.format(
        signature_name, model.signatures))
  signature = model.signatures[signature_name]

  # First arg of structured_input_signature tuple is shape, second is spec
  # (we currently only support named params passed as a dict)
  if (signature.structured_input_signature and
      len(signature.structured_input_signature) == 2 and
      isinstance(signature.structured_input_signature[1], dict)):
    return signature.structured_input_signature[1]

  return None
예제 #9
0
 def get_callable_input_specs(fn):
   if isinstance(_get_model_input_spec(fn), dict):
     return _get_model_input_spec(fn)
   else:
     input_specs = {}
     for input_name, input_tensor in zip(fn.input_names, fn.inputs):
       if hasattr(input_tensor, 'type_spec'):
         # "KerasTensor" types have type_spec attributes.
         type_spec = input_tensor.type_spec
       else:
         type_spec = tf.type_spec_from_value(input_tensor)
       input_specs[input_name] = type_spec
     return input_specs
예제 #10
0
    def output_type_specs(self) -> Dict[Text, common_types.TensorTypeSpec]:
        """Returns the tf.TypeSpecs of the decoded tensors.

    Returns:
      A dict whose keys are the same as keys of the dict returned by
      `decode_record()` and values are the tf.TypeSpec of the corresponding
      (composite) tensor.
    """
        return {
            k: tf.type_spec_from_value(v)
            for k, v in
            self._make_concrete_decode_function().structured_outputs.items()
        }
예제 #11
0
 def get_callable_input_specs(fn):
     # TODO(b/170241499): Update after TF adds support for specs to model.
     if hasattr(fn, '_get_save_spec') and isinstance(
             fn._get_save_spec(), dict):  # pylint: disable=protected-access
         return fn._get_save_spec()  # pylint: disable=protected-access
     else:
         input_specs = {}
         for input_name, input_tensor in zip(fn.input_names, fn.inputs):
             if hasattr(input_tensor, 'type_spec'):
                 # "KerasTensor" types have type_spec attributes.
                 type_spec = input_tensor.type_spec
             else:
                 type_spec = tf.type_spec_from_value(input_tensor)
             input_specs[input_name] = type_spec
         return input_specs
예제 #12
0
        def _calc_unbatched_spec(x):
            """Build Network output spec by removing previously added batch dimension.

      Args:
        x: tfp.distributions.Distribution or Tensor.
      Returns:
        Specs without batch dimension representing x.
      """
            if isinstance(x, tfp.distributions.Distribution):
                parameters = distribution_utils.get_parameters(x)
                parameter_specs = _convert_to_spec_and_remove_singleton_batch_dim(
                    parameters, outer_ndim=1)
                return distribution_utils.DistributionSpecV2(
                    event_shape=x.event_shape,
                    dtype=x.dtype,
                    parameters=parameter_specs)
            else:
                return tensor_spec.remove_outer_dims_nest(
                    tf.type_spec_from_value(x), num_outer_dims=1)
예제 #13
0
    def testNonLegacyDistribution(self):
        if not tf.executing_eagerly():
            self.skipTest(
                'Skipping test: sequential networks not supported in TF1')

        actor_network = create_sequential_actor_net()
        action_spec = {'my_action': self._action_spec}
        value_network = DummyValueNet()

        policy = ppo_policy.PPOPolicy(self._time_step_spec,
                                      action_spec,
                                      actor_network=actor_network,
                                      value_network=value_network)

        distribution_step = policy.distribution(self._time_step)
        self.assertIsInstance(distribution_step.action['my_action'],
                              tfp.distributions.TransformedDistribution)

        expected_info_spec = {
            'dist_params': {
                'my_action': {
                    'bijector': {
                        'bijectors:0': {},
                        'bijectors:1': {},
                        'bijectors:2': {}
                    },
                    'distribution': {
                        'scale': tf.TensorSpec([1], tf.float32),
                        'loc': tf.TensorSpec([1], tf.float32)
                    },
                }
            },
            'value_prediction': tf.TensorSpec([1, 1], tf.float32)
        }

        tf.nest.map_structure(
            lambda v, s: self.assertEqual(tf.type_spec_from_value(v), s),
            distribution_step.info, expected_info_spec)
예제 #14
0
 def remove_singleton_batch_spec_dim(t):
     # Convert tensor to its type-spec, and remove the batch dimension
     # from the spec.
     spec = tf.type_spec_from_value(t)
     return nest_utils.remove_singleton_batch_spec_dim(spec,
                                                       outer_ndim=1)
예제 #15
0
파일: network.py 프로젝트: panwliu/agents
def create_variables(module: typing.Union[Network, tf.keras.layers.Layer],
                     input_spec: typing.Optional[types.NestedTensorSpec] = None,
                     **kwargs: typing.Any) -> types.NestedTensorSpec:
  """Create variables in `module` given `input_spec`; return `output_spec`.

  Here `module` can be a `Network`, and we will soon also support Keras
  layers (and possibly Sonnet layers).

  Args:
    module: The instance we would like to create layers on.
    input_spec: The input spec (excluding batch dimensions).
    **kwargs: Extra arguments to `module.__call__`, e.g. `training=True`.

  Returns:
    Output specs, a nested `tf.TypeSpec` describing the output signature.
  """
  # NOTE(ebrevdo): As a side effect, for generic keras Layers (not Networks)
  # this method stores new hidden properties in `module`:
  # `_network_output_spec`, `_network_state_spec`, `_merged_output_and_state`
  # - which internal TF-Agents libraries make use of.
  if isinstance(module, Network):
    return module.create_variables(input_spec, **kwargs)

  # Generic keras layer
  if input_spec is None:
    raise ValueError(
        "Module is a Keras layer; an input_spec is required but saw "
        "None: {}".format(module))

  maybe_spec = getattr(module, "_network_output_spec", None)
  if maybe_spec is not None:
    return maybe_spec

  # Has state outputs - so expect that a state input is required,
  # and output[1:] are output states.
  recurrent_layer = getattr(module, "get_initial_state", None) is not None

  # Required input rank
  outer_ndim = _get_input_outer_ndim(module, input_spec)

  random_input = tensor_spec.sample_spec_nest(
      input_spec, outer_dims=(1,) * outer_ndim)

  if recurrent_layer:
    state = module.get_initial_state(random_input)
    state_spec = tf.nest.map_structure(
        lambda s: nest_utils.remove_singleton_batch_spec_dim(  # pylint: disable=g-long-lambda
            tf.type_spec_from_value(s),
            outer_ndim=1),
        state)
    outputs = module(random_input, state, **kwargs)
    # tf.keras.layers.{LSTM,RNN,GRU} with this return_state==True
    # return outputs of the form [output, state1, state2, ...]
    #
    # While tf.keras.layers.{LSTMCell, ...} return
    # (output, [state1, state2,...]).
    layer_config = module.get_config()
    merged_output_and_state = layer_config.get("return_state", False)
    if isinstance(module, recurrent.RNN):
      if not merged_output_and_state:
        # This is an RNN layer that doesn't return state.  Excludes individual
        # cells.
        raise ValueError(
            "Provided a Keras RNN layer with return_state==False. "
            "This configuration is not supported.  Layer: {}".format(module))
      if not layer_config.get("return_sequences", False):
        raise ValueError(
            "Provided a Keras RNN layer with return_sequences==False. "
            "This configuration is not supported.  Layer: {}".format(module))
    output = outputs[0]
  else:
    output = module(random_input, **kwargs)
    state_spec = ()
    merged_output_and_state = False

  def _calc_unbatched_spec(x):
    if isinstance(x, tfp.distributions.Distribution):
      return None
    else:
      return nest_utils.remove_singleton_batch_spec_dim(
          tf.type_spec_from_value(x), outer_ndim=outer_ndim)

  # pylint: disable=protected-access
  module._network_output_spec = tf.nest.map_structure(_calc_unbatched_spec,
                                                      output)
  module._network_state_spec = state_spec
  module._merged_output_and_state = merged_output_and_state

  return module._network_output_spec
예제 #16
0
 def _calc_unbatched_spec(x):
     if isinstance(x, tfp.distributions.Distribution):
         return None
     else:
         return nest_utils.remove_singleton_batch_spec_dim(
             tf.type_spec_from_value(x), outer_ndim=1)
예제 #17
0
 def convert(self, tensor: TensorAlike) -> List[pa.Array]:
   if not self._type_spec.is_compatible_with(tensor):
     raise TypeError("Expected {} but got {}".format(
         self._type_spec, tf.type_spec_from_value(tensor)))
   return self._convert_internal(tensor)