Exemple #1
0
  def _unshard_shape(self, shape):
    """Return the unsharded shape that would generate a given sharded shape.

    Args:
      shape: the sharded shape to unshard

    Returns:
      The unsharded shape.

    Raises:
      ValueError: if shape is unknown or does not contain
        self.shard_dimension
      TypeError: if shape is not convertible to a TensorShape
    """
    shape = tensor_shape.as_shape(shape)
    if self._number_of_shards == 1:
      # Don't do anything when there's only one shard.
      return shape
    ndims = shape.ndims
    if ndims is None:
      raise ValueError("shape must be a specified shape not Unknown")
    if ndims <= self._shard_dimension:
      raise ValueError("shape %s does not contain shard_dimension %d" %
                       (shape.as_list(), self._shard_dimension))
    dims = shape.as_list()
    dims[self._shard_dimension] *= self._number_of_shards
    return tensor_shape.as_shape(dims)
  def compute_output_shape(self, input_shape):
    if isinstance(input_shape, list):
      input_shape = input_shape[0]

    if _is_multiple_state(self.cell.state_size):
      state_size = self.cell.state_size
    else:
      state_size = [self.cell.state_size]

    if getattr(self.cell, 'output_size', None) is not None:
      output_dim = tensor_shape.as_shape(self.cell.output_size).as_list()
    else:
      # Note that state_size[0] could be a tensor_shape or int.
      output_dim = tensor_shape.as_shape(state_size[0]).as_list()

    if self.return_sequences:
      output_shape = tuple([input_shape[0], input_shape[1]] + output_dim)
    else:
      output_shape = tuple([input_shape[0]] + output_dim)

    if self.return_state:
      state_shape = [
          tuple([input_shape[0]] + tensor_shape.as_shape(dim).as_list())
          for dim in state_size
      ]
      return [output_shape] + state_shape
    else:
      return output_shape
def _concat(prefix, suffix, static=False):
  """Concat that enables int, Tensor, or TensorShape values.

  This function takes a size specification, which can be an integer, a
  TensorShape, or a Tensor, and converts it into a concatenated Tensor
  (if static = False) or a list of integers (if static = True).

  Args:
    prefix: The prefix; usually the batch size (and/or time step size).
      (TensorShape, int, or Tensor.)
    suffix: TensorShape, int, or Tensor.
    static: If `True`, return a python list with possibly unknown dimensions.
      Otherwise return a `Tensor`.

  Returns:
    shape: the concatenation of prefix and suffix.

  Raises:
    ValueError: if `suffix` is not a scalar or vector (or TensorShape).
    ValueError: if prefix or suffix was `None` and asked for dynamic
      Tensors out.
  """
  if isinstance(prefix, ops.Tensor):
    p = prefix
    p_static = tensor_util.constant_value(prefix)
    if p.shape.ndims == 0:
      p = array_ops.expand_dims(p, 0)
    elif p.shape.ndims != 1:
      raise ValueError("prefix tensor must be either a scalar or vector, "
                       "but saw tensor: %s" % p)
  else:
    p = tensor_shape.as_shape(prefix)
    p_static = p.as_list() if p.ndims is not None else None
    p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
         if p.is_fully_defined() else None)
  if isinstance(suffix, ops.Tensor):
    s = suffix
    s_static = tensor_util.constant_value(suffix)
    if s.shape.ndims == 0:
      s = array_ops.expand_dims(s, 0)
    elif s.shape.ndims != 1:
      raise ValueError("suffix tensor must be either a scalar or vector, "
                       "but saw tensor: %s" % s)
  else:
    s = tensor_shape.as_shape(suffix)
    s_static = s.as_list() if s.ndims is not None else None
    s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
         if s.is_fully_defined() else None)

  if static:
    shape = tensor_shape.as_shape(p_static).concatenate(s_static)
    shape = shape.as_list() if shape.ndims is not None else None
  else:
    if p is None or s is None:
      raise ValueError("Provided a prefix or suffix of None: %s and %s"
                       % (prefix, suffix))
    shape = array_ops.concat((p, s), 0)
  return shape
Exemple #4
0
def make_attr(attr_type, value):
  if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
    return dtypes.as_dtype(value)
  elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
    return [dtypes.as_dtype(v) for v in value]
  elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
    return tensor_shape.as_shape(value).as_proto()
  elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
    return [tensor_shape.as_shape(v).as_proto() for v in value]
  return value
 def _duplicated_test(self,
                      init,
                      shape=None,
                      dtype=dtypes.float32):
   if shape is None:
     shape = [100]
   t1 = self.evaluate(init(shape, dtype))
   t2 = self.evaluate(init(shape, dtype))
   self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
   self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
   self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
Exemple #6
0
  def get_sharded_shape(self, shape, shard_index=None):
    """Returns the shape of a shard of a full Tensor.

    When given the shape of a 'full-size' Tensor, returns the shape of
    the sub-Tensor after it has been sharded. Freezes the policy if it
    has not yet been frozen.

    Args:
      shape: The shape of the full-size Tensor to be sharded.
      shard_index: The index of the shard whose shape should be returned.
        shard_index can be None for sharding policies that use the same
        shape for every shard.
      freeze_config:

    Returns:
      The shape of the sharded version of the Tensor.

    Raises:
      ValueError: If shard_index is None when shards are of different
        shapes; or shard_index is not None and
        !(0<=shard_index<number_of_shards); or shape does not have at
        least self.shard_dimension+1 dimensions; or the value of
        shape's shard dimension is not a multiple of
        self.number_of_shards
    """
    if self._shard_dimension is None or self._number_of_shards is None:
      # Don't raise an error if the config is unset.
      return None
    if shard_index is not None:
      if shard_index < 0 or shard_index >= self.number_of_shards:
        raise ValueError("shard_index %d, but must be in [0,%d)." %
                         (shard_index, self._number_of_shards))
    shape = tensor_shape.as_shape(shape)
    if self._number_of_shards == 1:
      # Don't do anything when there's only one shard.
      return shape
    ndims = shape.ndims
    if ndims is None:
      raise ValueError("shape must be a specified shape not Unknown")
    if ndims <= self._shard_dimension:
      raise ValueError("shape %s does not contain shard_dimension %d" %
                       (shape.as_list(), self._shard_dimension))
    dims = shape.as_list()
    if dims[self._shard_dimension] is None:
      raise ValueError("shape %s must have a fixed size for dimension %d "
                       "that is known at graph construction time." %
                       (shape.as_list(), self._shard_dimension))
    if (dims[self._shard_dimension] % self._number_of_shards) != 0:
      raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
                       (shape.as_list(), self._number_of_shards,
                        self._shard_dimension))
    dims[self._shard_dimension] /= self._number_of_shards
    return tensor_shape.as_shape(dims)
  def testConvertFromProto(self):
    proto = tensor_util.MakeTensorShapeProto([])
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.as_shape(proto))

    proto = tensor_util.MakeTensorShapeProto([1, 37, 42])
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.as_shape(proto))
 def _identical_test(self,
                     init1,
                     init2,
                     assertion,
                     shape=None,
                     dtype=dtypes.float32):
   if shape is None:
     shape = [100]
   t1 = self.evaluate(init1(shape, dtype))
   t2 = self.evaluate(init2(shape, dtype))
   self.assertEqual(tensor_shape.as_shape(shape), t1.shape)
   self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
   self.assertEqual(assertion, np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
Exemple #9
0
def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False,
                   unknown_rank_allowed=False):
  """Convert shapes to a list of tuples of int (or None)."""
  if unknown_dim_allowed:
    if (not isinstance(shapes, collections.Sequence)
        or not shapes
        or any(shape is None or isinstance(shape, int) for shape in shapes)):
      raise ValueError(
          "When providing partial shapes, a list of shapes must be provided.")
  if shapes is None: return None
  if isinstance(shapes, tensor_shape.TensorShape):
    shapes = [shapes]
  if not isinstance(shapes, (tuple, list)):
    raise TypeError(
        "shapes must be a TensorShape or a list or tuple of TensorShapes.")
  if all(shape is None or isinstance(shape, int) for shape in shapes):
    # We have a single shape.
    shapes = [shapes]
  shapes = [tensor_shape.as_shape(shape) for shape in shapes]
  if not unknown_dim_allowed:
    if any([not shape.is_fully_defined() for shape in shapes]):
      raise ValueError("All shapes must be fully defined: %s" % shapes)
  if not unknown_rank_allowed:
    if any([shape.dims is None for shape in shapes]):
      raise ValueError("All shapes must have a defined rank: %s" % shapes)

  return shapes
 def _merge_batch_beams(self, t, s=None):
     """Merges the tensor from a batch of beams into a batch by beams.
     More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
     reshape this into [batch_size*beam_width, s]
     Args:
       t: Tensor of dimension [batch_size, beam_width, s]
       s: (Possibly known) depth shape.
     Returns:
       A reshaped version of t with dimension [batch_size * beam_width, s].
     """
     if isinstance(s, ops.Tensor):
         s = tensor_shape.as_shape(tensor_util.constant_value(s))
     else:
         s = tensor_shape.TensorShape(s)
     t_shape = tf.shape(t)
     static_batch_size = tensor_util.constant_value(self._batch_size)
     batch_size_beam_width = (
         None if static_batch_size is None
         else static_batch_size * self._beam_width)
     reshaped_t = tf.reshape(
         t, tf.concat(
             ([self._batch_size * self._beam_width], t_shape[2:]), 0))
     reshaped_t.set_shape(
         (tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
     return reshaped_t
  def testUnknownInputChannels(self):
    images = random_ops.random_uniform((5, 7, 9, 4))
    images._shape = tensor_shape.as_shape((5, 7, 9, None))
    layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)

    images = random_ops.random_uniform((5, 4, 7, 9))
    images._shape = tensor_shape.as_shape((5, None, 7, 9))
    layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)
  def testUnknownInputChannelsConv1D(self):
    data = random_ops.random_uniform((5, 4, 7))
    data._shape = tensor_shape.as_shape((5, 4, None))
    layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(data)

    data = random_ops.random_uniform((5, 7, 4))
    data._shape = tensor_shape.as_shape((5, None, 4))
    layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(data)
def _default_getter(name, shape, dtype, initializer=None,
                    partition_info=None, **kwargs):
  """A pared-down version of get_variable which does not reuse variables."""
  dtype = dtypes.as_dtype(dtype)
  shape_object = tensor_shape.as_shape(shape)
  with ops.init_scope():
    if initializer is None:
      initializer, initializing_from_value = (
          variable_scope._get_default_variable_store()._get_default_initializer(  # pylint: disable=protected-access
              name=name, shape=shape_object, dtype=dtype))
    else:
      initializing_from_value = not callable(initializer)
    # Same logic as get_variable
    variable_dtype = dtype.base_dtype
    if initializing_from_value:
      if shape is not None:
        raise ValueError("If initializer is a constant, do not specify shape.")
      initial_value = initializer
    else:
      # Instantiate initializer if provided initializer is a type object.
      if isinstance(initializer, type(init_ops.Initializer)):
        initializer = initializer(dtype=dtype)
      def initial_value():
        return initializer(
            shape_object.as_list(), dtype=dtype, partition_info=partition_info)
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value,
        name=name,
        dtype=variable_dtype,
        **kwargs
    )
  def __init__(self,
               initial_value=None,
               name=None,
               trainable=True,
               collections=None,
               dtype=None,
               shape=None):
    """Creates a variable.

    Args:
      initial_value: A `Tensor` or Python object convertible to a `Tensor`
        representing the initial value of this variable.
      name: The name of this variable. Automatically uniquified.
      trainable: Whether the global read of this variable will be used for
        training.
      collections: Additional collections to which the `read` operation for
        this variable is to be added. Defaults to [].
      dtype: The type of this variable. Can be omitted if it can be deduced
        from the initial_value. If different from the type of the initial
        value it will be cast to this type.
      shape: The shape of this variable. Only specify if there is no initial
        value but shape inference is desired.
    """
    if initial_value is not None:
      initial_value = ops.convert_to_tensor(initial_value)
    if dtype is None:
      assert initial_value is not None, ("Trying to create a resource variable "
                                         "with no dtype or initial value. At"
                                         " least one of these must be set.")
      dtype = initial_value.dtype
    elif initial_value is not None:
      initial_value = math_ops.cast(initial_value, dtype)
    if shape is None:
      if initial_value is not None:
        shape = initial_value.get_shape().as_proto()
      else:
        shape = tensor_shape.unknown_shape()
    else:
      shape = tensor_shape.as_shape(shape)

    self._dtype = dtype
    with ops.name_scope(name, "Variable", [initial_value]) as name:
      self._handle = var_handle_op(shared_name=name,
                                   name=name,
                                   dtype=dtype,
                                   shape=shape)

      with ops.name_scope("IsInitialized"):
        self._is_initialized_op = var_is_initialized_op(self._handle)
      if initial_value is not None:
        with ops.name_scope("Create"):
          self._initialize_op = create_variable_op(self._handle, initial_value)
        resources.register_resource(self._handle,
                                    self._initialize_op,
                                    self._is_initialized_op)

      with ops.name_scope("Read"):
        self._value = read_variable_op(self._handle, dtype=self._dtype)
      _register_dense_variable_read(
          self._value, trainable=trainable, collections=collections)
  def _test(self, kwargs, expected_values=None, expected_err=None):
    with self.test_session() as sess:
      if expected_err:
        with self.assertRaisesWithPredicateMatch(expected_err[0],
                                                 expected_err[1]):
          out = parsing_ops.parse_single_example_v2(**kwargs)
          sess.run(flatten_values_tensors_or_sparse(out.values()))
      else:
        # Returns dict w/ Tensors and SparseTensors.
        out = parsing_ops.parse_single_example_v2(**kwargs)
        # Check values.
        tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
        _compare_output_to_expected(self, out, expected_values, tf_result)

      # Check shapes.
      for k, f in kwargs["features"].items():
        if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
          self.assertEqual(tuple(out[k].get_shape()),
                           tensor_shape.as_shape(f.shape))
        elif isinstance(f, parsing_ops.VarLenFeature):
          self.assertEqual(
              tuple(out[k].indices.get_shape().as_list()), (None, 1))
          self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
          self.assertEqual(
              tuple(out[k].dense_shape.get_shape().as_list()), (1,))
Exemple #16
0
def partial_shape_to_tensor(shape_like):
  """Returns a `tf.Tensor` that represents the given shape.

  Args:
    shape_like: A value that can be converted to a `tf.TensorShape` or a
      `tf.Tensor`.

  Returns:
    A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
    `-1` is substituted for any unknown dimensions.
  """
  try:
    # First attempt to convert the input to a shape, and return the
    # "canonical" tensor representation, which uses `-1` in place of
    # `None`.
    shape_like = tensor_shape.as_shape(shape_like)
    return ops.convert_to_tensor(
        [dim if dim is not None else -1 for dim in shape_like.as_list()],
        dtype=dtypes.int64)
  except (TypeError, ValueError):
    # The argument was not trivially convertible to a
    # `tf.TensorShape`, so fall back on the conversion to tensor
    # machinery.
    ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64)
    if ret.shape.dims is not None and len(ret.shape.dims) != 1:
      raise ValueError("The given shape %s must be a 1-D tensor of tf.int64 "
                       "values, but the shape was %s."
                       % (shape_like, ret.shape))
    if ret.dtype != dtypes.int64:
      raise TypeError("The given shape %s must be a 1-D tensor of tf.int64 "
                      "values, but the element type was %s."
                      % (shape_like, ret.dtype.name))

    return ret
 def testUnknownInputChannelsConv3D(self):
   volumes = random_ops.random_uniform((5, 6, 7, 9, 9))
   volumes._shape = tensor_shape.as_shape((5, 6, 7, 9, None))
   layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
   with self.assertRaisesRegexp(ValueError,
                                'The channel dimension of the inputs '
                                'should be defined. Found `None`.'):
     _ = layer.apply(volumes)
 def testShapeEquals(self):
     t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
     self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
     self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
     self.assertTrue(tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
     self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
     self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
     self.assertFalse(tensor_util.ShapeEquals(t, [4]))
Exemple #19
0
def _merge_shapes(shape_list, enqueue_many):
  shape_list = [tensor_shape.as_shape(s) for s in shape_list]
  if enqueue_many:
    # We want the shapes without the leading batch dimension.
    shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
  merged_shape = shape_list[0]
  for s in shape_list[1:]:
    merged_shape.merge_with(s)
  return merged_shape.as_list()
Exemple #20
0
  def event_shape(self):
    """Shape of a single sample from a single batch as a `TensorShape`.

    May be partially defined or unknown.

    Returns:
      event_shape: `TensorShape`, possibly unknown.
    """
    return tensor_shape.as_shape(self._event_shape())
 def testFlatStructure(self, value_fn, expected_structure, expected_types,
                       expected_shapes):
   value = value_fn()
   s = structure.Structure.from_value(value)
   self.assertIsInstance(s, expected_structure)
   self.assertEqual(expected_types, s._flat_types)
   for expected, actual in zip(expected_shapes, s._flat_shapes):
     self.assertTrue(actual.is_compatible_with(expected))
     self.assertTrue(
         tensor_shape.as_shape(expected).is_compatible_with(actual))
Exemple #22
0
 def _shape_is_compatible_0dim(this, other):
   other = tensor_shape.as_shape(other)
   if this.ndims != other.ndims:
     return False
   for dim, (x_dim, y_dim) in enumerate(zip(this.dims, other.dims)):
     if dim == 0:
       continue
     if not x_dim.is_compatible_with(y_dim):
       return False
   return True
Exemple #23
0
  def testConvertFromProto(self):
    proto = tensor_util.MakeTensorShapeProto([])
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.as_shape(proto))

    proto = tensor_util.MakeTensorShapeProto([1, 37, 42])
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.as_shape(proto))

    partial_proto_shape = tensor_shape.as_shape(
        tensor_util.MakeTensorShapeProto([-1, 37, 42]))
    partial_shape = tensor_shape.TensorShape([None, 37, 42])
    self.assertNotEqual(partial_proto_shape, partial_shape)
    self.assertEqual(partial_proto_shape[0].value, None)
    self.assertEqual(partial_proto_shape[1].value, 37)
    self.assertEqual(partial_proto_shape[2].value, 42)
    self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
Exemple #24
0
  def batch_shape(self):
    """Shape of a single sample from a single event index as a `TensorShape`.

    May be partially defined or unknown.

    The batch dimensions are indexes into independent, non-identical
    parameterizations of this distribution.

    Returns:
      batch_shape: `TensorShape`, possibly unknown.
    """
    return tensor_shape.as_shape(self._batch_shape())
Exemple #25
0
def call_cpp_shape_fn(op, input_tensors_needed=None, debug_python_shape_fn=None):
    """A shape function that delegates to the registered C++ shape function.

  Args:
    op: the node in the graph for which to compute output shapes.
    input_tensors_needed: a list of input tensor indices for which to compute
      the input tensor's value and pass to the C++ shape function.
    debug_python_shape_fn: For testing only during migration to using
      call_cpp_shape_fn. Do not submit calls that set this,
      as the comparison is slow. If non-None, the python shape function;
      this function will be called and its output compared to that of
      the C++ shape function.

  Returns:
    A TensorShape list of the output shapes of the op, as computed using the
    C++ shape inference function registered for the op.

  Raises:
    ValueError: If the C++ shape function returned an error (e.g. because the
    shapes of the inputs are of the wrong rank or otherwise incompatible
    according to the shape function).
  """
    node_def_str = op.node_def.SerializeToString()
    input_shapes = [i.get_shape().as_proto().SerializeToString() for i in op.inputs]

    input_tensors = [None for i in input_shapes]
    if input_tensors_needed:
        for idx in input_tensors_needed:
            input_tensors[idx] = tensor_util.constant_value(op.inputs[idx])
            if input_tensors[idx] is not None:
                input_tensors[idx] = np.asarray(input_tensors[idx])

    try:
        with errors.raise_exception_on_not_ok_status() as status:
            output_shapes = pywrap_tensorflow.RunCppShapeInference(node_def_str, input_shapes, input_tensors, status)
    except errors.InvalidArgumentError as err:
        raise ValueError(err.message)

    # Convert TensorShapeProto values in output_shapes.
    result = [tensor_shape.TensorShape(tensor_shape_pb2.TensorShapeProto.FromString(s)) for s in output_shapes]

    if debug_python_shape_fn:
        try:
            python_result = [tensor_shape.as_shape(s) for s in debug_python_shape_fn(op)]
        except Exception as err:
            raise AssertionError("Python shape function return error but " "C++ shape functon did not: %s" % str(err))
        if str(result) != str(python_result):
            raise ValueError(
                ("Python vs CPP shape mismatch.  " "CPP: %s vs python: %s on node %s " "with input shapes %s")
                % (str(result), str(python_result), str(op.node_def), ",".join([str(i.get_shape()) for i in op.inputs]))
            )

    return result
 def __init__(self, resource, dtype, name, shape):
   self._handle = resource
   self._graph_shape = tensor_shape.as_shape(shape)
   self._handle_device = resource.device
   self._handle_name = name
   self._cached_value = None
   self._initializer_op = None
   self._caching_device = None
   self._dtype = dtype
   self._constraint = None
   self._in_graph_mode = context.in_graph_mode()
   if self._in_graph_mode:
     self._graph_element = self.read_value()
 def _shape_is_compatible_0dim(this, other):
   """Checks that shapes are compatible skipping dim 0."""
   other = tensor_shape.as_shape(other)
   # If shapes are None (unknown) they may be compatible.
   if this.dims is None or other.dims is None:
     return True
   if this.ndims != other.ndims:
     return False
   for dim, (x_dim, y_dim) in enumerate(zip(this.dims, other.dims)):
     if dim == 0:
       continue
     if not x_dim.is_compatible_with(y_dim):
       return False
   return True
  def testConvertFromProto(self):
    def make_tensor_shape_proto(shape):
      return tensor_shape_pb2.TensorShapeProto(
          dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
    proto = make_tensor_shape_proto([])
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([]),
                     tensor_shape.as_shape(proto))

    proto = make_tensor_shape_proto([1, 37, 42])
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.TensorShape(proto))
    self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
                     tensor_shape.as_shape(proto))

    partial_proto_shape = tensor_shape.as_shape(
        make_tensor_shape_proto([-1, 37, 42]))
    partial_shape = tensor_shape.TensorShape([None, 37, 42])
    self.assertNotEqual(partial_proto_shape, partial_shape)
    self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[0]), None)
    self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[1]), 37)
    self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[2]), 42)
    self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
Exemple #29
0
def _as_shape_list(shapes, dtypes):
  """Convert shapes to a list of tuples of int (or None)."""
  if shapes is None: return None
  if isinstance(shapes, tensor_shape.TensorShape):
    shapes = [shapes]
  if not isinstance(shapes, (tuple, list)):
    raise TypeError(
        "shapes must be a TensorShape or a list or tuple of TensorShapes.")
  if all(isinstance(shape, int) for shape in shapes):
    # We have a single shape.
    shapes = [shapes]
  shapes = [tensor_shape.as_shape(shape) for shape in shapes]
  if any(not shape.is_fully_defined() for shape in shapes):
    raise ValueError("All shapes must be fully defined.")
  return shapes
def _MakeShape(v, arg_name):
  """Convert v into a TensorShapeProto."""
  # Args:
  #   v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
  #   arg_name: String, for error messages.

  # Returns:
  #   A TensorShapeProto.
  if isinstance(v, tensor_shape_pb2.TensorShapeProto):
    for d in v.dim:
      if d.name:
        logging.warning("Warning: TensorShapeProto with a named dimension: %s",
                        str(v))
        break
    return v
  return tensor_shape.as_shape(v).as_proto()
Exemple #31
0
def _MakeShape(v, arg_name):
    """Convert v into a TensorShapeProto."""
    # Args:
    #   v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
    #   arg_name: String, for error messages.

    # Returns:
    #   A TensorShapeProto.
    if isinstance(v, tensor_shape_pb2.TensorShapeProto):
        for d in v.dim:
            if d.name:
                logging.warning(
                    "Warning: TensorShapeProto with a named dimension: %s",
                    str(v))
                break
        return v
    try:
        return tensor_shape.as_shape(v).as_proto()
    except TypeError as e:
        raise TypeError("Error converting %s to a TensorShape: %s" %
                        (arg_name, e))
    except ValueError as e:
        raise ValueError("Error converting %s to a TensorShape: %s" %
                         (arg_name, e))
Exemple #32
0
  def set_tuple_shapes(self, tuple_shapes):
    """Sets the shape of each element of the queue.

    tuple_shapes must be a list of length
    self.number_of_tuple_elements, and each element must be
    convertible to a TensorShape.

    Args:
      tuple_shapes: the shapes of each queue element.

    Raises:
      ValueError: if tuple_shapes is not of length
        self.number_of_tuple_elements.
      TypeError: if an element of tuple_shapes cannot be converted to
        a TensorShape.
    """
    if len(tuple_shapes) != self.number_of_tuple_elements:
      raise ValueError("tuple_shapes is %s, but must be a list of length %d" %
                       (str(tuple_shapes), self.number_of_tuple_elements))
    try:
      tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]
    except (ValueError, TypeError) as e:
      raise TypeError(
          "tuple_shapes is %s, but must be a list of elements each "
          "convertible to TensorShape: got error %s" % (str(tuple_shapes),
                                                        str(e)))
    if self._frozen:
      for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes):
        if frozen != updated:
          raise ValueError(
              "Trying to update InfeedQueue with frozen configuration with an "
              "incompatible shape. Frozen shapes are %s, updated shapes are %s"
              % (str(self._tuple_shapes), str(tuple_shapes)))
    else:
      self._tuple_shapes = tuple_shapes
    self._validate()
Exemple #33
0
def _state_size_with_prefix(state_size, prefix=None):
    result_state_size = tensor_shape.as_shape(state_size).as_list()
    if prefix is not None:
        result_state_size = prefix + result_state_size
    return result_state_size
Exemple #34
0
 def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color="red"):
     self.x_shape = tensor_shape.as_shape(x_shape)
     self.x_dtype = dtypes.as_dtype(x_dtype)
     self.y_shape = tensor_shape.as_shape(y_shape)
     self.y_dtype = dtypes.as_dtype(y_dtype)
     self.color = color
def set_shape(self, shape):
  if 'shape' in self.attr and len(self.attr['shape'].shape.dim) != len(shape):
    raise ValueError('dimension mismatch')
  TensorShapeProto = _tensor_shape.as_shape(shape).as_proto()
  self.attr['shape'].shape.CopyFrom(TensorShapeProto)
Exemple #36
0
def _maybe_tensor_shape_from_tensor(shape):
    if isinstance(shape, ops.Tensor):
        return tensor_shape.as_shape(tensor_util.constant_value(shape))
    else:
        return shape
    def test_gradient(self,
                      shape,
                      rt_value,
                      rt_grad,
                      default_value,
                      default_grad,
                      output_value,
                      output_grad,
                      ragged_rank=None):
        """Tests that ragged_to_dense generates the right gradient.

    Args:
      shape: The `shape` arg for `ragged_to_dense`.
      rt_value: The `rt_input` arg for `ragged_to_dense`.
      rt_grad: The expected gradient for `rt_value`.  Corresponds 1:1 with
        `rt_value`.
      default_value: The `default_value` arg for `ragged_to_dense`.
      default_grad: The expected gradient for `default_value`.  Corresponds 1:1
        with `default_value`.
      output_value: The expected output of `ragged_to_dense`.
      output_grad: The gradient for the output (used to generate the gradients
        `rt_grad` and `default_grad`).  Corresponds 1:1 with `output_value`.
      ragged_rank: Ragged rank for `rt_value`.
    """
        if context.executing_eagerly():
            return

        rt_value = ragged_factory_ops.constant(rt_value,
                                               dtype=dtypes.float32,
                                               ragged_rank=ragged_rank)
        rt_grad = ragged_factory_ops.constant(rt_grad,
                                              dtype=dtypes.float32,
                                              ragged_rank=ragged_rank)
        default_value = constant_op.constant(default_value,
                                             dtype=dtypes.float32)
        default_grad = constant_op.constant(default_grad, dtype=dtypes.float32)
        output_value = constant_op.constant(output_value,
                                            dtype=dtypes.float32,
                                            shape=shape)
        output_grad = constant_op.constant(output_grad,
                                           dtype=dtypes.float32,
                                           shape=shape)
        shape = tensor_shape.as_shape(shape)

        # There are different code paths for ragged_to_dense, depending on whether
        # the RaggedTensor was created from row_splits or value_rowids.  Make sure
        # that we test both.
        for partition_type in ['row_splits', 'value_rowids']:

            # There are different code paths when computing the gradient for
            # default_value, depending on whether shape info is statically available;
            # make sure that we test all code paths.
            for shape_info in ['known', 'unknown_dims', 'unknown_rank']:
                rt_val = self.rt_with_partition_type(rt_value, partition_type)
                rt_val = self.wrap_in_placeholder(rt_val, shape_info)
                default_val = self.wrap_in_placeholder(default_value,
                                                       shape_info)
                shape_val = self.wrap_in_placeholder(shape, shape_info)
                out = ragged_conversion_ops.ragged_to_dense(
                    rt_val, default_val, shape_val)
                self.assertAllClose(out, output_value)

                actual_flat_values_grad, actual_default_grad = gradients_impl.gradients(
                    ys=out,
                    xs=(rt_value.flat_values, default_value),
                    grad_ys=output_grad)
                self.assertIsInstance(actual_flat_values_grad,
                                      indexed_slices.IndexedSlices)
                actual_flat_values_grad = ops.convert_to_tensor(
                    actual_flat_values_grad)
                actual_values_grad = rt_value.with_flat_values(
                    actual_flat_values_grad)
                self.assertAllClose(actual_values_grad, rt_grad)
                self.assertAllClose(actual_default_grad, default_grad)
def set_attr_shape(node, key, value):
    try:
        node.attr[key].CopyFrom(
            attr_value_pb2.AttrValue(shape=tensor_shape.as_shape(value).as_proto()))
    except KeyError:
        pass
Exemple #39
0
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
                      allow_broadcast=False):
  """Create a TensorProto.

  In TensorFlow 2.0, representing tensors as protos should no longer be a
  common workflow. That said, this utility function is still useful for
  generating TF Serving request protos:

    request = tensorflow_serving.apis.predict_pb2.PredictRequest()
    request.model_spec.name = "my_model"
    request.model_spec.signature_name = "serving_default"
    request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))

  make_tensor_proto accepts "values" of a python scalar, a python list, a
  numpy ndarray, or a numpy scalar.

  If "values" is a python scalar or a python list, make_tensor_proto
  first convert it to numpy ndarray. If dtype is None, the
  conversion tries its best to infer the right numpy data
  type. Otherwise, the resulting numpy array has a compatible data
  type with the given dtype.

  In either case above, the numpy ndarray (either the caller provided
  or the auto converted) must have the compatible type with dtype.

  make_tensor_proto then converts the numpy array to a tensor proto.

  If "shape" is None, the resulting tensor proto represents the numpy
  array precisely.

  Otherwise, "shape" specifies the tensor's shape and the numpy array
  can not have more elements than what "shape" specifies.

  Args:
    values:         Values to put in the TensorProto.
    dtype:          Optional tensor_pb2 DataType value.
    shape:          List of integers representing the dimensions of tensor.
    verify_shape:   Boolean that enables verification of a shape of values.
    allow_broadcast:  Boolean that enables allowing scalars and 1 length vector
        broadcasting. Cannot be true when verify_shape is true.

  Returns:
    A `TensorProto`. Depending on the type, it may contain data in the
    "tensor_content" attribute, which is not directly useful to Python programs.
    To access the values you should convert the proto back to a numpy ndarray
    with `tf.make_ndarray(proto)`.

    If `values` is a `TensorProto`, it is immediately returned; `dtype` and
    `shape` are ignored.

  Raises:
    TypeError:  if unsupported types are provided.
    ValueError: if arguments have inappropriate values or if verify_shape is
     True and shape of values is not equals to a shape from the argument.

  """
  if allow_broadcast and verify_shape:
    raise ValueError("allow_broadcast and verify_shape are not both allowed.")
  if isinstance(values, tensor_pb2.TensorProto):
    return values

  if dtype:
    dtype = dtypes.as_dtype(dtype)

  is_quantized = (
      dtype in [
          dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
          dtypes.qint32
      ])

  if _is_array_like(values):
    values = np.asarray(values)

  # We first convert value to a numpy array or scalar.
  if isinstance(values, (np.ndarray, np.generic)):
    if dtype and dtype.is_numpy_compatible:
      nparray = values.astype(dtype.as_numpy_dtype)
    else:
      nparray = values
  else:
    if values is None:
      raise ValueError("None values not supported.")
    # if dtype is provided, forces numpy array to be the type
    # provided if possible.
    if dtype and dtype.is_numpy_compatible:
      np_dt = dtype.as_numpy_dtype
    else:
      np_dt = None
    # If shape is None, numpy.prod returns None when dtype is not set, but
    # raises exception when dtype is set to np.int64
    if shape is not None and np.prod(shape, dtype=np.int64) == 0:
      nparray = np.empty(shape, dtype=np_dt)
    else:
      _AssertCompatible(values, dtype)
      nparray = np.array(values, dtype=np_dt)
      # check to them.
      # We need to pass in quantized values as tuples, so don't apply the shape
      if (list(nparray.shape) != _GetDenseDimensions(values) and
          not is_quantized):
        raise ValueError("""Argument must be a dense tensor: %s"""
                         """ - got shape %s, but wanted %s.""" %
                         (values, list(nparray.shape),
                          _GetDenseDimensions(values)))

    # python/numpy default float type is float64. We prefer float32 instead.
    if (nparray.dtype == np.float64) and dtype is None:
      nparray = nparray.astype(np.float32)
    # python/numpy default int type is int64. We prefer int32 instead.
    elif (nparray.dtype == np.int64) and dtype is None:
      downcasted_array = nparray.astype(np.int32)
      # Do not down cast if it leads to precision loss.
      if np.array_equal(downcasted_array, nparray):
        nparray = downcasted_array

  # if dtype is provided, it must be compatible with what numpy
  # conversion says.
  numpy_dtype = dtypes.as_dtype(nparray.dtype)
  if numpy_dtype is None:
    raise TypeError("Unrecognized data type: %s" % nparray.dtype)

  # If dtype was specified and is a quantized type, we convert
  # numpy_dtype back into the quantized version.
  if is_quantized:
    numpy_dtype = dtype

  if dtype is not None and (not hasattr(dtype, "base_dtype") or
                            dtype.base_dtype != numpy_dtype.base_dtype):
    raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
                    (dtype, nparray.dtype, values))

  # If shape is not given, get the shape from the numpy array.
  if shape is None:
    shape = nparray.shape
    is_same_size = True
    shape_size = nparray.size
  else:
    shape = [int(dim) for dim in shape]
    shape_size = np.prod(shape, dtype=np.int64)
    is_same_size = shape_size == nparray.size

    if allow_broadcast:
      if nparray.shape == (1,) or nparray.shape == tuple():
        pass
      elif nparray.size != shape_size:
        raise TypeError("Expected Tensor's shape: %s, got %s." %
                        (tuple(shape), nparray.shape))

    else:
      if verify_shape and nparray.shape != tuple(shape):
        raise TypeError("Expected Tensor's shape: %s, got %s." %
                        (tuple(shape), nparray.shape))

      if nparray.size > shape_size:
        raise ValueError(
            "Too many elements provided. Needed at most %d, but received %d" %
            (shape_size, nparray.size))

  tensor_proto = tensor_pb2.TensorProto(
      dtype=numpy_dtype.as_datatype_enum,
      tensor_shape=tensor_shape.as_shape(shape).as_proto())

  if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
    if nparray.size * nparray.itemsize >= (1 << 31):
      raise ValueError(
          "Cannot create a tensor proto whose content is larger than 2GB.")
    tensor_proto.tensor_content = nparray.tostring()
    return tensor_proto

  # If we were not given values as a numpy array, compute the proto_values
  # from the given values directly, to avoid numpy trimming nulls from the
  # strings. Since values could be a list of strings, or a multi-dimensional
  # list of lists that might or might not correspond to the given shape,
  # we flatten it conservatively.
  if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
    proto_values = _FlattenToStrings(values)

    # At this point, values may be a list of objects that we could not
    # identify a common type for (hence it was inferred as
    # np.object/dtypes.string).  If we are unable to convert it to a
    # string, we raise a more helpful error message.
    #
    # Ideally, we'd be able to convert the elements of the list to a
    # common type, but this type inference requires some thinking and
    # so we defer it for now.
    try:
      str_values = [compat.as_bytes(x) for x in proto_values]
    except TypeError:
      raise TypeError("Failed to convert object of type %s to Tensor. "
                      "Contents: %s. Consider casting elements to a "
                      "supported type." % (type(values), values))
    tensor_proto.string_val.extend(str_values)
    return tensor_proto

  # TensorFlow expects C order (a.k.a., eigen row major).
  proto_values = nparray.ravel()

  append_fn = GetNumpyAppendFn(proto_values.dtype)
  if append_fn is None:
    raise TypeError(
        "Element type not supported in TensorProto: %s" % numpy_dtype.name)
  append_fn(tensor_proto, proto_values)

  return tensor_proto
 def create_zeros(unnested_state_size):
     flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
     init_state_size = [batch_size_tensor] + flat_dims
     return array_ops.zeros(init_state_size, dtype=dtype)
Exemple #41
0
def make_tensor_proto(values, dtype=None, shape=None):
    """Create a TensorProto.

    Args:
      values:    Values to put in the TensorProto.
      dtype:     Optional tensor_pb2 DataType value.
      shape:     List of integers representing the dimensions of tensor.

    Returns:
      A TensorProto. Depending on the type, it may contain data in the
      "tensor_content" attribute, which is not directly useful to Python programs.
      To access the values you should convert the proto back to a numpy ndarray
      with tensor_util.MakeNdarray(proto).

    Raises:
      TypeError:  if unsupported types are provided.
      ValueError: if arguments have inappropriate values.

    make_tensor_proto accepts "values" of a python scalar, a python list, a
    numpy ndarray, or a numpy scalar.

    If "values" is a python scalar or a python list, make_tensor_proto
    first convert it to numpy ndarray. If dtype is None, the
    conversion tries its best to infer the right numpy data
    type. Otherwise, the resulting numpy array has a compatible data
    type with the given dtype.

    In either case above, the numpy ndarray (either the caller provided
    or the auto converted) must have the compatible type with dtype.

    make_tensor_proto then converts the numpy array to a tensor proto.

    If "shape" is None, the resulting tensor proto represents the numpy
    array precisely.

    Otherwise, "shape" specifies the tensor's shape and the numpy array
    can not have more elements than what "shape" specifies.

    """
    if dtype:
        dtype = dtypes.as_dtype(dtype)

    # We first convert value to a numpy array or scalar.
    if isinstance(values, (np.ndarray, np.generic)):
        if dtype:
            nparray = values.astype(dtype.as_numpy_dtype)
        else:
            nparray = values
    else:
        if values is None:
            raise ValueError("None values not supported.")
        # if dtype is provided, forces numpy array to be the type
        # provided if possible.
        np_dt = dtype.as_numpy_dtype if dtype else None
        if np.prod(shape) == 0:
            nparray = np.empty(shape, dtype=np_dt)
        else:
            _AssertCompatible(values, dtype)
            nparray = np.array(values, dtype=np_dt)
            if list(nparray.shape) != _GetDenseDimensions(values):
                raise ValueError("Argument must be a dense tensor: %s" %
                                 values)
        # python/numpy default float type is float64. We prefer float32 instead.
        if (nparray.dtype == np.float64) and dtype is None:
            nparray = nparray.astype(np.float32)
        # python/numpy default int type is int64. We prefer int32 instead.
        elif (nparray.dtype == np.int64) and dtype is None:
            nparray = nparray.astype(np.int32)

    # if dtype is provided, it must be compatible with what numpy
    # conversion says.
    numpy_dtype = dtypes.as_dtype(nparray.dtype)
    if numpy_dtype is None:
        raise TypeError("Unrecognized data type: %s" % nparray.dtype)

    # If dtype was specified and is a quantized type, we convert
    # numpy_dtype back into the quantized version.
    if dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint32]:
        numpy_dtype = dtype

    if dtype is not None and not dtype.base_dtype == numpy_dtype.base_dtype:
        raise TypeError("Incompatible types: %s vs. %s" %
                        (dtype, nparray.dtype))

    # If shape is not given, get the shape from the numpy array.
    if shape is None:
        shape = nparray.shape
        is_same_size = True
        shape_size = nparray.size
    else:
        shape = [int(dim) for dim in shape]
        shape_size = np.prod(shape)
        is_same_size = shape_size == nparray.size

        if nparray.size > shape_size:
            raise ValueError(
                "Too many elements provided. Needed at most %d, but received %d"
                % (shape_size, nparray.size))

    tensor_proto = tensor_pb2.TensorProto(
        dtype=numpy_dtype.as_datatype_enum,
        tensor_shape=tensor_shape.as_shape(shape).as_proto())

    if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
        if nparray.size * nparray.itemsize >= (1 << 31):
            raise ValueError(
                "Cannot create a tensor proto whose content is larger than 2GB."
            )
        tensor_proto.tensor_content = nparray.tostring()
        return tensor_proto

    # If we were not given values as a numpy array, compute the proto_values
    # from the given values directly, to avoid numpy trimming nulls from the
    # strings. Since values could be a list of strings, or a multi-dimensional
    # list of lists that might or might not correspond to the given shape,
    # we flatten it conservatively.
    if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
        proto_values = _FlattenToStrings(values)
        tensor_proto.string_val.extend(
            [compat.as_bytes(x) for x in proto_values])
        return tensor_proto

    # TensorFlow expects C order (a.k.a., eigen row major).
    proto_values = nparray.ravel()

    append_fn = GetNumpyAppendFn(proto_values.dtype)
    if append_fn is None:
        raise TypeError("Element type not supported in TensorProto: %s" %
                        numpy_dtype.name)
    append_fn(tensor_proto, proto_values)

    return tensor_proto
  def _create_variable(self, next_creator, **kwargs):
    """Implements StrategyExtendedV2._create_variable.

    Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
    created if satisfying all the following criteria:
      1. `self._variable_partitioner` results in more than one partition on the
         first axis.
      2. variable's rank is greater than 0.
      3. variable is not colocated with another variable.
    Otherwise a `Variable` will be created.

    Args:
      next_creator: See `variable_scope.variable_creator_scope`; the next
        creator in the chain.
      **kwargs: Passed through to the next creator.

    Returns:
      A `Variable` or `ShardedVariable`.
    """

    if "colocate_with" in kwargs:  # Never partition colocated_with variables.
      colocate_with = kwargs["colocate_with"]
      # Clear the variable scope to avoid possible conflicts between device
      # scope and colocation scope.
      with ops.device(None):
        with ops.colocate_with(colocate_with):
          var = next_creator(**kwargs)
          logging.debug(
              "Creating variable (name:%s, shape:%r) that colocates with %s",
              var.name, var.shape, kwargs["colocate_with"].name)
          return var

    if self._variable_partitioner is None:
      return self._create_variable_round_robin(next_creator, **kwargs)

    name = kwargs.get("name", None)
    initial_value = kwargs.get("initial_value", None)
    if initial_value is None:
      raise ValueError("initial_value must be specified.")

    # Two cases where initial_value can be a callable:
    #   1. initial_value is passed as a callable, e.g, an `initializer` class.
    #   2. restoring from checkpoint, initial_value is a
    #     "CheckpointInitialValueCallable".
    init_from_fn = callable(initial_value)

    dtype = kwargs.get("dtype", None)
    shape = kwargs.get("shape", None)
    if init_from_fn and (shape is None or dtype is None):
      init_from_fn = False
      initial_value = initial_value()
    if not init_from_fn:
      # The initial_value is created on coordinator, it will need to be sent to
      # ps for variable initialization, which can be inefficient and can
      # potentially hit the 2GB limit on protobuf serialization.
      initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
      dtype = initial_value.dtype
      shape = initial_value.shape
    else:
      shape = tensor_shape.as_shape(shape)

    if shape.rank == 0:  # Skip partitioning rank-0 variable.
      return self._create_variable_round_robin(next_creator, **kwargs)

    num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
    if not num_partitions or num_partitions[0] == 0 or any(
        v != 1 for v in num_partitions[1:]):
      raise ValueError(
          "variable_partitioner must return a list/tuple whose elements are 1"
          " besides the first element (non-zero), got: %r" % num_partitions)

    if num_partitions[0] == 1:  # no partition
      return self._create_variable_round_robin(next_creator, **kwargs)

    # Use "div" partition strategy to partition the variable.
    num_partitions = min(num_partitions[0], shape[0])
    base = shape[0] // num_partitions
    extra = shape[0] % num_partitions
    # An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]
    # offsets: [0, 3, 6, 8, 10]
    offsets = []
    for i in range(num_partitions):
      if i == 0:
        offsets.append(0)
      else:
        prev_shard_size = base + (1 if i - 1 < extra else 0)
        offsets.append(offsets[i - 1] + prev_shard_size)
    offsets.append(shape[0])

    def init_shard_fn(shard_index):
      if not init_from_fn:
        logging.log_if(
            logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and
            shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
        return initial_value[offsets[shard_index]:offsets[shard_index + 1]]
      arg_spec = tf_inspect.getfullargspec(initial_value)
      if ("shard_info" not in arg_spec.args and
          "shard_info" not in arg_spec.kwonlyargs):
        # `initial_value` is a callable that doesn't accept `shard_info`.
        logging.log_if(
            logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and
            shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
        full_value = initial_value()
        return full_value[offsets[shard_index]:offsets[shard_index + 1]]
      else:
        # Memory-efficient way of initializing sharded variable. It requires
        # the `init_fn` to accept a namedtuple `shard_info`.
        component_shape = (offsets[shard_index + 1] -
                           offsets[shard_index],) + shape[1:]
        offsets_all_axes = (offsets[shard_index],) + (0,) * len(shape[1:])
        return initial_value(
            shard_info=trackable.ShardInfo(
                shape=tensor_shape.as_shape(component_shape),
                offset=offsets_all_axes))

    var_list = []
    for i in range(num_partitions):
      kwargs["shape"] = (offsets[i + 1] - offsets[i],) + shape[1:]
      kwargs["initial_value"] = lambda: init_shard_fn(i)
      if name is not None:
        kwargs["name"] = "{}/part_{}".format(name, i)
      var_list.append(self._create_variable_round_robin(next_creator, **kwargs))

    result = sharded_variable.ShardedVariable(var_list)
    return result
Exemple #43
0
    def _create_variable(self, next_creator, **kwargs):
        """Implements StrategyExtendedV2._create_variable.

    Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
    created if satisfying all the following criteria:
      1. `self._variable_partitioner` results in more than one partition on the
         first axis.
      2. variable's rank is greater than 0.
      3. variable is not colocated with another variable.
    Otherwise a `Variable` will be created.

    Args:
      next_creator: See `variable_scope.variable_creator_scope`; the next
        creator in the chain.
      **kwargs: Passed through to the next creator.

    Returns:
      A `Variable` or `ShardedVariable`.
    """

        var_creator = self._create_var_creator(next_creator, **kwargs)
        if "colocate_with" in kwargs:  # Never partition colocated_with variables.
            colocate_with = kwargs["colocate_with"]
            # Clear the variable scope to avoid possible conflicts between device
            # scope and colocation scope.
            with ops.device(None):
                with ops.colocate_with(colocate_with):
                    var = var_creator(**kwargs)
                    logging.debug(
                        "Creating variable (name:%s, shape:%r) that colocates with %s",
                        var.name, var.shape, kwargs["colocate_with"].name)
                    return var

        if self._variable_partitioner is None:
            return self._create_variable_round_robin(var_creator, **kwargs)

        name = kwargs.get("name", None)
        dtype = kwargs.get("dtype", None)
        shape = kwargs.get("shape", None)
        initial_value = kwargs.get("initial_value", None)
        if initial_value is None:
            # If we are loading, next_creator will return an UninitializedVariable
            v = next_creator(**kwargs)
            if not isinstance(v, resource_variable_ops.UninitializedVariable):
                raise ValueError(
                    "It looks like you are using `ParameterServerStrategy` with a "
                    "`variable_partitioner`, and trying to create a variable without "
                    "specifying `initial_value`. This is not allowed. Please specify the "
                    "`initial_value`.")
            elif shape is None or dtype is None:
                raise ValueError(
                    "It looks like you are trying to load a `SavedModel` using "
                    "`tf.saved_model.load` within a `ParameterServerStrategy` scope, "
                    "but the `SavedModel` is missing shape or dtype information."
                )
            else:

                def initializer(shape, dtype, **kwargs):
                    if "partition_shape" in kwargs:
                        shape = kwargs["partition_shape"]
                    return array_ops.zeros(shape, dtype)

                initial_value = functools.partial(initializer,
                                                  shape=shape,
                                                  dtype=dtype)

        # Two cases where initial_value can be a callable:
        #   1. initial_value is passed as a callable, e.g, an `initializer` class.
        #   2. restoring from checkpoint, initial_value is a
        #     "CheckpointInitialValueCallable".
        init_from_fn = callable(initial_value)

        if init_from_fn and (shape is None or dtype is None):
            init_from_fn = False
            initial_value = initial_value()
        if not init_from_fn:
            # The initial_value is created on coordinator, it will need to be sent to
            # ps for variable initialization, which can be inefficient and can
            # potentially hit the 2GB limit on protobuf serialization.
            initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
            dtype = initial_value.dtype
            shape = initial_value.shape
        else:
            shape = tensor_shape.as_shape(shape)

        if shape.rank == 0:  # Skip partitioning rank-0 variable.
            return self._create_variable_round_robin(var_creator, **kwargs)

        num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
        if not num_partitions or num_partitions[0] == 0 or any(
                v != 1 for v in num_partitions[1:]):
            raise ValueError(
                "variable_partitioner must return a list/tuple whose elements are 1"
                " besides the first element (non-zero), got: %r" %
                num_partitions)

        if num_partitions[0] == 1:  # no partition
            return self._create_variable_round_robin(var_creator, **kwargs)

        # Use "div" partition strategy to partition the variable.
        num_partitions = min(num_partitions[0], shape[0])
        base = shape[0] // num_partitions
        extra = shape[0] % num_partitions
        # An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]
        # offsets: [0, 3, 6, 8, 10]
        offsets = []
        for i in range(num_partitions):
            if i == 0:
                offsets.append(0)
            else:
                prev_shard_size = base + (1 if i - 1 < extra else 0)
                offsets.append(offsets[i - 1] + prev_shard_size)
        offsets.append(shape[0])

        def init_shard_fn(shard_index):
            if not init_from_fn:
                logging.log_if(
                    logging.WARN, _INEFFICIENT_INIT_WARNING % name,
                    shard_index == 0
                    and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
                return initial_value[offsets[shard_index]:offsets[shard_index +
                                                                  1]]
            partition_shape = (offsets[shard_index + 1] -
                               offsets[shard_index], ) + shape[1:]
            partition_offset = (
                offsets[shard_index], ) + (0, ) * len(shape[1:])
            arg_spec = tf_inspect.getfullargspec(initial_value)
            if ("shard_info" not in arg_spec.args
                    and "shard_info" not in arg_spec.kwonlyargs):
                try:
                    value = initial_value(partition_shape=partition_shape,
                                          partition_offset=partition_offset)
                except (TypeError, ValueError):
                    # TypeError: Initializer doesn't accept kwargs
                    # ValueError: Initializer doesn't accept partition kwargs
                    # In both cases we go ahead creating the full value and then slice.
                    value = initial_value()

                if value.shape == partition_shape:
                    # Initializer supports partition: value is the partition value.
                    return value
                else:
                    # Initializer doesn't support partition: value is the full value
                    # and needs to be sliced to get the partition value.
                    logging.log_if(
                        logging.WARN, _INEFFICIENT_INIT_WARNING % name,
                        shard_index == 0 and
                        shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
                    return value[offsets[shard_index]:offsets[shard_index + 1]]
            else:
                # For compatibility with `CheckpointInitialValueCallable`.
                return initial_value(shard_info=trackable.ShardInfo(
                    shape=tensor_shape.as_shape(partition_shape),
                    offset=partition_offset))

        var_list = []
        for i in range(num_partitions):
            kwargs["shape"] = (offsets[i + 1] - offsets[i], ) + shape[1:]
            kwargs["initial_value"] = lambda: init_shard_fn(i)
            if name is not None:
                kwargs["name"] = "{}/part_{}".format(name, i)
            var_list.append(
                self._create_variable_round_robin(var_creator, **kwargs))

        result = sharded_variable.ShardedVariable(var_list)
        return result
Exemple #44
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
    """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-0 or rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.

  Raises:
    ValueError: If the shape is rank-0 and is not statically known to be -1.
  """
    if isinstance(tensor, ops.EagerTensor):
        return tensor_shape.as_shape(
            [dim if dim != -1 else None for dim in tensor.numpy()])

    if tensor.get_shape().ndims == 0:
        value = constant_value(tensor)
        if value is None:
            raise ValueError(
                "Received a scalar with unknown value as shape; require a statically "
                "known scalar with value '-1' to describe an unknown shape.")
        if value != -1:
            raise ValueError(
                "Received a scalar value '%s' as shape; require a statically known "
                "scalar with value '-1' to describe an unknown shape." % value)
        return tensor_shape.unknown_shape()

    shape = tensor.get_shape().with_rank(1)
    if shape == [0]:
        return tensor_shape.TensorShape([])
    elif tensor.op.type == "Cast":
        pre_cast = constant_value_as_shape(tensor.op.inputs[0])
        if pre_cast.dims is None:
            # the input to cast has a totally undefined shape; just return that.
            return pre_cast
        cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
        if cast_dtype not in (dtypes.int32, dtypes.int64):
            return tensor_shape.unknown_shape(shape.dims[0].value)
        dest_dtype_shape_array = np.array([
            x if x is not None else -1 for x in pre_cast.as_list()
        ]).astype(cast_dtype.as_numpy_dtype)
        return tensor_shape.TensorShape(
            [x if x >= 0 else None for x in dest_dtype_shape_array])
    elif tensor.op.type == "Shape":
        return tensor.op.inputs[0].get_shape()
    elif tensor.op.type == "Pack":
        ret = tensor_shape.TensorShape([])  # Empty list.
        # Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
        # would not be rank 1.
        assert tensor.op.get_attr("axis") == 0
        for pack_input in tensor.op.inputs:
            # `pack_input` must be a scalar. Attempt to evaluate it, and append it
            # to `ret`.
            pack_input_val = constant_value(pack_input)
            if pack_input_val is None or pack_input_val < 0:
                new_dim = tensor_shape.Dimension(None)
            else:
                new_dim = tensor_shape.Dimension(pack_input_val)
            ret = ret.concatenate([new_dim])
        return ret
    elif tensor.op.type == "Concat":
        # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.TensorShape([])  # Empty list.
        for concat_input in tensor.op.inputs[1:]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "ConcatV2":
        # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.TensorShape([])  # Empty list.
        for concat_input in tensor.op.inputs[:-1]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "StridedSlice":
        try:
            begin = constant_value(tensor.op.inputs[1])
            end = constant_value(tensor.op.inputs[2])
            strides = constant_value(tensor.op.inputs[3])
            if begin is not None and end is not None and strides is not None:
                begin = begin[0]
                end = end[0]
                strides = strides[0]
                begin_mask = tensor.op.get_attr("begin_mask")
                if begin_mask == 1:
                    begin = None
                end_mask = tensor.op.get_attr("end_mask")
                if end_mask == 1:
                    end = None

                ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
                new_axis_mask = tensor.op.get_attr("new_axis_mask")
                shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
                valid_attributes = (not ellipsis_mask and not new_axis_mask
                                    and not shrink_axis_mask
                                    and (not begin_mask or (begin_mask == 1))
                                    and (not end_mask or (end_mask == 1)))
                if valid_attributes:  # additional inputs not supported
                    prev = constant_value_as_shape(tensor.op.inputs[0])
                    prev = prev[begin:end:strides]
                    ret = tensor_shape.TensorShape(prev)
                    return ret

        except ValueError:  # Could come from get_attr or slicing prev.
            pass
        except TypeError:  # Could come from slicing prev.
            pass
    elif (tensor.op.type == "Placeholder" and tensor.op.graph.building_function
          and hasattr(tensor.op.graph, "internal_captures")):
        # If we are inside a FuncGraph try to lookup the constant value of the
        # corresponding external capture. Note that we only look at captures and
        # not the fed inputs because those can be fed different values in different
        # instantiations of the function call or different iterations of a
        # tf.while_loop.
        for i, capture in enumerate(tensor.op.graph.internal_captures):
            if capture is tensor:
                external_capture = tensor.op.graph.external_captures[i]
                return constant_value_as_shape(external_capture)

    ret = tensor_shape.unknown_shape(shape.dims[0].value)
    value = constant_value(tensor)
    if value is not None:
        ret = ret.merge_with(
            tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
    return ret
 def __init__(self, dtype, dense_shape):
     self._dtype = dtypes.as_dtype(dtype)
     self._dense_shape = tensor_shape.as_shape(dense_shape)
Exemple #46
0
 def _convert_to_shape(shape): 
     if isinstance(shape, ops.Tensor):
         return tensor_shape.as_shape(tensor_util.constant_value(shape))
     else:
         return shape
Exemple #47
0
def accumulate_n_v2(inputs, shape=None, tensor_dtype=None, name=None):
    """Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
  wait for all of its inputs to be ready before beginning to sum. This can
  save memory if inputs are ready at different times, since minimum temporary
  storage is proportional to the output size rather than the inputs size.

  Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.

  For example:

  ```python
  a = tf.constant([[1, 2], [3, 4]])
  b = tf.constant([[5, 0], [0, 6]])
  tf.accumulate_n_v2([a, b, a])  # [[7, 4], [6, 14]]

  # Explicitly pass shape and type
  tf.accumulate_n_v2([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
                                                                   # [[7,  4],
                                                                   #  [6, 14]]
  ```

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Shape of elements of `inputs`.
    tensor_dtype: The type of `inputs`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
    _INPUTS_ERR_MSG = ValueError("inputs must be a list of at least one Tensor"
                                 "with the same dtype and shape")
    if not inputs or not isinstance(inputs, (list, tuple)):
        raise _INPUTS_ERR_MSG
    inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
    if not all(isinstance(x, ops.Tensor) for x in inputs):
        raise _INPUTS_ERR_MSG
    if not all(x.dtype == inputs[0].dtype for x in inputs):
        raise _INPUTS_ERR_MSG
    if shape is not None:
        shape = tensor_shape.as_shape(shape)
    else:
        shape = tensor_shape.unknown_shape()
    for input_tensor in inputs:
        if isinstance(input_tensor, ops.Tensor):
            shape = shape.merge_with(input_tensor.get_shape())

    # tensor_dtype is for safety only; operator's output type computed in C++
    if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
        raise TypeError("tensor_dtype is {}, but input is of type {}".format(
            tensor_dtype, inputs[0].dtype))

    if len(inputs) == 1 and name is None:
        return inputs[0]
    elif len(inputs) == 1 and name is not None:
        return array_ops.identity(inputs[0], name=name)
    elif context.in_eager_mode():
        # TemporaryVariable not currently supported in eager mode; fall back
        # onto AddN for now.
        # TODO(frreiss) remove this once the lifetime of eager variables gets
        # addressed
        return math_ops.add_n(inputs, name=name)
    else:
        return gen_math_ops._accumulate_nv2(inputs, name=name, shape=shape)
Exemple #48
0
def _parse_example_raw(serialized,
                       names=None,
                       sparse_keys=None,
                       sparse_types=None,
                       dense_keys=None,
                       dense_types=None,
                       dense_defaults=None,
                       dense_shapes=None,
                       name=None):
    """Parses `Example` protos.

  Args:
    serialized: A vector (1-D Tensor) of strings, a batch of binary
      serialized `Example` protos.
    names: A vector (1-D Tensor) of strings (optional), the names of
      the serialized protos.
    sparse_keys: A list of string keys in the examples' features.
      The results for these keys will be returned as `SparseTensor` objects.
    sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
      Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
      and `tf.string` (`BytesList`) are supported.
    dense_keys: A list of string keys in the examples' features.
      The results for these keys will be returned as `Tensor`s
    dense_types: A list of DTypes of the same length as `dense_keys`.
      Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
      and `tf.string` (`BytesList`) are supported.
    dense_defaults: A dict mapping string keys to `Tensor`s.
      The keys of the dict must match the dense_keys of the feature.
    dense_shapes: A list of tuples with the same length as `dense_keys`.
      The shape of the data for each dense feature referenced by `dense_keys`.
      Required for any input tensors identified by `dense_keys` whose shapes are
      anything other than `[]` or `[1]`.
    name: A name for this operation (optional).

  Returns:
    A `dict` mapping keys to `Tensor`s and `SparseTensor`s.

  Raises:
    ValueError: If sparse and dense key sets intersect, or input lengths do not
      match up.
  """
    with ops.name_scope(name, "ParseExample", [serialized, names]):
        names = [] if names is None else names
        dense_defaults = {} if dense_defaults is None else dense_defaults
        sparse_keys = [] if sparse_keys is None else sparse_keys
        sparse_types = [] if sparse_types is None else sparse_types
        dense_keys = [] if dense_keys is None else dense_keys
        dense_types = [] if dense_types is None else dense_types
        dense_shapes = ([[]] * len(dense_keys)
                        if dense_shapes is None else dense_shapes)

        num_dense = len(dense_keys)
        num_sparse = len(sparse_keys)

        if len(dense_shapes) != num_dense:
            raise ValueError(
                "len(dense_shapes) != len(dense_keys): %d vs. %d" %
                (len(dense_shapes), num_dense))
        if len(dense_types) != num_dense:
            raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" %
                             (len(dense_types), num_dense))
        if len(sparse_types) != num_sparse:
            raise ValueError(
                "len(sparse_types) != len(sparse_keys): %d vs. %d" %
                (len(sparse_types), num_sparse))
        if num_dense + num_sparse == 0:
            raise ValueError(
                "Must provide at least one sparse key or dense key")
        if not set(dense_keys).isdisjoint(set(sparse_keys)):
            raise ValueError(
                "Dense and sparse keys must not intersect; intersection: %s" %
                set(dense_keys).intersection(set(sparse_keys)))

        dense_defaults_vec = []
        for i, key in enumerate(dense_keys):
            default_value = dense_defaults.get(key)
            if default_value is None:
                default_value = constant_op.constant([], dtype=dense_types[i])
            elif not isinstance(default_value, ops.Tensor):
                key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
                default_value = ops.convert_to_tensor(default_value,
                                                      dtype=dense_types[i],
                                                      name=key_name)
                default_value = array_ops.reshape(default_value,
                                                  dense_shapes[i])

            dense_defaults_vec.append(default_value)

        dense_shapes = [
            tensor_shape.as_shape(shape).as_proto() for shape in dense_shapes
        ]

        # pylint: disable=protected-access
        outputs = gen_parsing_ops._parse_example(
            serialized=serialized,
            names=names,
            dense_defaults=dense_defaults_vec,
            sparse_keys=sparse_keys,
            sparse_types=sparse_types,
            dense_keys=dense_keys,
            dense_shapes=dense_shapes,
            name=name)
        # pylint: enable=protected-access

        (sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs

        sparse_tensors = [
            sparse_tensor.SparseTensor(ix, val, shape)
            for (ix, val,
                 shape) in zip(sparse_indices, sparse_values, sparse_shapes)
        ]

        return dict(
            zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
Exemple #49
0
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
  """Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  For example:

  ```python
  # tensor 'a' is [[1, 2], [3, 4]]
  # tensor `b` is [[5, 0], [0, 6]]
  tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]

  # Explicitly pass shape and type
  tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
    ==> [[7, 4], [6, 14]]
  ```

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Shape of elements of `inputs`.
    tensor_dtype: The type of `inputs`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
  if tensor_dtype is None:
    if not inputs or not isinstance(inputs, (list, tuple)):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
    if not all(isinstance(x, ops.Tensor) for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    if not all(x.dtype == inputs[0].dtype for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    tensor_dtype = inputs[0].dtype
  if shape is not None:
    shape = tensor_shape.as_shape(shape)
  else:
    shape = tensor_shape.unknown_shape()
    for input_tensor in inputs:
      if isinstance(input_tensor, ops.Tensor):
        shape = shape.merge_with(input_tensor.get_shape())
  if not shape.is_fully_defined():
    # TODO(pbar): Make a version of assign_add that accepts an uninitialized
    # lvalue, and takes its shape from that? This would allow accumulate_n to
    # work in all situations that add_n currently works.
    raise ValueError("Cannot infer the shape of the accumulator for "
                     "accumulate_n. Pass the shape argument, or set the shape "
                     "of at least one of the inputs.")
  with ops.op_scope(inputs, name, "AccumulateN") as name:
    var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
    var_name = var.op.name
    var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
    update_ops = []
    for input_tensor in inputs:
      op = state_ops.assign_add(var, input_tensor, use_locking=True)
      update_ops.append(op)
    with ops.control_dependencies(update_ops):
      return gen_state_ops._destroy_temporary_variable(var,
                                                       var_name=var_name,
                                                       name=name)
Exemple #50
0
def _parse_single_sequence_example_raw(serialized,
                                       context_sparse_keys=None,
                                       context_sparse_types=None,
                                       context_dense_keys=None,
                                       context_dense_types=None,
                                       context_dense_defaults=None,
                                       context_dense_shapes=None,
                                       feature_list_sparse_keys=None,
                                       feature_list_sparse_types=None,
                                       feature_list_dense_keys=None,
                                       feature_list_dense_types=None,
                                       feature_list_dense_shapes=None,
                                       feature_list_dense_defaults=None,
                                       debug_name=None,
                                       name=None):
    """Parses a single `SequenceExample` proto.

  Args:
    serialized: A scalar (0-D Tensor) of type string, a single binary
      serialized `SequenceExample` proto.
    context_sparse_keys: A list of string keys in the `SequenceExample`'s
      features.  The results for these keys will be returned as
      `SparseTensor` objects.
    context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
      Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
      and `tf.string` (`BytesList`) are supported.
    context_dense_keys: A list of string keys in the examples' features.
      The results for these keys will be returned as `Tensor`s
    context_dense_types: A list of DTypes, same length as `context_dense_keys`.
      Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
      and `tf.string` (`BytesList`) are supported.
    context_dense_defaults: A dict mapping string keys to `Tensor`s.
      The keys of the dict must match the context_dense_keys of the feature.
    context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
      The shape of the data for each context_dense feature referenced by
      `context_dense_keys`.  Required for any input tensors identified by
      `context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
    feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
      feature_lists.  The results for these keys will be returned as
      `SparseTensor` objects.
    feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
      Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
      and `tf.string` (`BytesList`) are supported.
    feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
      features_lists. The results for these keys will be returned as `Tensor`s.
    feature_list_dense_types: A list of `DTypes`, same length as
      `feature_list_dense_keys`.  Only `tf.float32` (`FloatList`),
      `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
    feature_list_dense_shapes: A list of tuples, same length as
      `feature_list_dense_keys`.  The shape of the data for each
      `FeatureList` feature referenced by `feature_list_dense_keys`.
    feature_list_dense_defaults: A dict mapping key strings to values.
      The only currently allowed value is `None`.  Any key appearing
      in this dict with value `None` is allowed to be missing from the
      `SequenceExample`.  If missing, the key is treated as zero-length.
    debug_name: A scalar (0-D Tensor) of strings (optional), the name of
      the serialized proto.
    name: A name for this operation (optional).

  Returns:
    A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
    The first dict contains the context key/values.
    The second dict contains the feature_list key/values.

  Raises:
    ValueError: If context_sparse and context_dense key sets intersect,
      if input lengths do not match up, or if a value in
      feature_list_dense_defaults is not None.
    TypeError: if feature_list_dense_defaults is not either None or a dict.
  """
    with ops.name_scope(name, "ParseSingleSequenceExample", [serialized]):
        context_dense_defaults = ({} if context_dense_defaults is None else
                                  context_dense_defaults)
        context_sparse_keys = ([] if context_sparse_keys is None else
                               context_sparse_keys)
        context_sparse_types = ([] if context_sparse_types is None else
                                context_sparse_types)
        context_dense_keys = ([] if context_dense_keys is None else
                              context_dense_keys)
        context_dense_types = ([] if context_dense_types is None else
                               context_dense_types)
        context_dense_shapes = ([[]] * len(context_dense_keys)
                                if context_dense_shapes is None else
                                context_dense_shapes)
        feature_list_sparse_keys = ([] if feature_list_sparse_keys is None else
                                    feature_list_sparse_keys)
        feature_list_sparse_types = ([] if feature_list_sparse_types is None
                                     else feature_list_sparse_types)
        feature_list_dense_keys = ([] if feature_list_dense_keys is None else
                                   feature_list_dense_keys)
        feature_list_dense_types = ([] if feature_list_dense_types is None else
                                    feature_list_dense_types)
        feature_list_dense_shapes = ([[]] * len(feature_list_dense_keys)
                                     if feature_list_dense_shapes is None else
                                     feature_list_dense_shapes)
        feature_list_dense_defaults = (dict()
                                       if feature_list_dense_defaults is None
                                       else feature_list_dense_defaults)
        debug_name = "" if debug_name is None else debug_name

        # Internal
        feature_list_dense_missing_assumed_empty = []

        num_context_dense = len(context_dense_keys)
        num_feature_list_dense = len(feature_list_dense_keys)
        num_context_sparse = len(context_sparse_keys)
        num_feature_list_sparse = len(feature_list_sparse_keys)

        if len(context_dense_shapes) != num_context_dense:
            raise ValueError(
                "len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
                % (len(context_dense_shapes), num_context_dense))
        if len(context_dense_types) != num_context_dense:
            raise ValueError(
                "len(context_dense_types) != len(num_context_dense): %d vs. %d"
                % (len(context_dense_types), num_context_dense))
        if len(feature_list_dense_shapes) != num_feature_list_dense:
            raise ValueError(
                "len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
                "%d vs. %d" %
                (len(feature_list_dense_shapes), num_feature_list_dense))
        if len(feature_list_dense_types) != num_feature_list_dense:
            raise ValueError(
                "len(feature_list_dense_types) != len(num_feature_list_dense):"
                "%d vs. %d" %
                (len(feature_list_dense_types), num_feature_list_dense))
        if len(context_sparse_types) != num_context_sparse:
            raise ValueError(
                "len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
                % (len(context_sparse_types), num_context_sparse))
        if len(feature_list_sparse_types) != num_feature_list_sparse:
            raise ValueError(
                "len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
                "%d vs. %d" %
                (len(feature_list_sparse_types), num_feature_list_sparse))
        if (num_context_dense + num_context_sparse + num_feature_list_dense +
                num_feature_list_sparse) == 0:
            raise ValueError(
                "Must provide at least one context_sparse key, context_dense key, "
                ", feature_list_sparse key, or feature_list_dense key")
        if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
            raise ValueError(
                "context_dense and context_sparse keys must not intersect; "
                "intersection: %s" %
                set(context_dense_keys).intersection(set(context_sparse_keys)))
        if not set(feature_list_dense_keys).isdisjoint(
                set(feature_list_sparse_keys)):
            raise ValueError(
                "feature_list_dense and feature_list_sparse keys must not intersect; "
                "intersection: %s" % set(feature_list_dense_keys).intersection(
                    set(feature_list_sparse_keys)))
        if not isinstance(feature_list_dense_defaults, dict):
            raise TypeError("feature_list_dense_defaults must be a dict")
        for k, v in feature_list_dense_defaults.items():
            if v is not None:
                raise ValueError(
                    "Value feature_list_dense_defaults[%s] must be None" % k)
            feature_list_dense_missing_assumed_empty.append(k)

        context_dense_defaults_vec = []
        for i, key in enumerate(context_dense_keys):
            default_value = context_dense_defaults.get(key)
            if default_value is None:
                default_value = constant_op.constant(
                    [], dtype=context_dense_types[i])
            elif not isinstance(default_value, ops.Tensor):
                key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
                default_value = ops.convert_to_tensor(
                    default_value, dtype=context_dense_types[i], name=key_name)
                default_value = array_ops.reshape(default_value,
                                                  context_dense_shapes[i])

            context_dense_defaults_vec.append(default_value)

        context_dense_shapes = [
            tensor_shape.as_shape(shape).as_proto()
            for shape in context_dense_shapes
        ]
        feature_list_dense_shapes = [
            tensor_shape.as_shape(shape).as_proto()
            for shape in feature_list_dense_shapes
        ]

        # pylint: disable=protected-access
        outputs = gen_parsing_ops._parse_single_sequence_example(
            serialized=serialized,
            debug_name=debug_name,
            context_dense_defaults=context_dense_defaults_vec,
            context_sparse_keys=context_sparse_keys,
            context_sparse_types=context_sparse_types,
            context_dense_keys=context_dense_keys,
            context_dense_shapes=context_dense_shapes,
            feature_list_sparse_keys=feature_list_sparse_keys,
            feature_list_sparse_types=feature_list_sparse_types,
            feature_list_dense_keys=feature_list_dense_keys,
            feature_list_dense_types=feature_list_dense_types,
            feature_list_dense_shapes=feature_list_dense_shapes,
            feature_list_dense_missing_assumed_empty=(
                feature_list_dense_missing_assumed_empty),
            name=name)
        # pylint: enable=protected-access

        (context_sparse_indices, context_sparse_values, context_sparse_shapes,
         context_dense_values, feature_list_sparse_indices,
         feature_list_sparse_values, feature_list_sparse_shapes,
         feature_list_dense_values) = outputs

        context_sparse_tensors = [
            sparse_tensor.SparseTensor(ix, val, shape)
            for (ix, val,
                 shape) in zip(context_sparse_indices, context_sparse_values,
                               context_sparse_shapes)
        ]

        feature_list_sparse_tensors = [
            sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in
            zip(feature_list_sparse_indices, feature_list_sparse_values,
                feature_list_sparse_shapes)
        ]

        context_output = dict(
            zip(context_sparse_keys + context_dense_keys,
                context_sparse_tensors + context_dense_values))
        feature_list_output = dict(
            zip(feature_list_sparse_keys + feature_list_dense_keys,
                feature_list_sparse_tensors + feature_list_dense_values))

        return (context_output, feature_list_output)
Exemple #51
0
    def _init_from_args(self,
                        initial_value=None,
                        trainable=True,
                        collections=None,
                        validate_shape=True,
                        caching_device=None,
                        name=None,
                        dtype=None,
                        expected_shape=None):
        """Creates a new variable from arguments.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called. In
        that case, `dtype` must be specified. (Note that initializer functions
        from init_ops.py must first be bound to a shape before being used here.)
      trainable: If `True`, the default, also adds the variable to the graph
        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
        the default list of variables to use by the `Optimizer` classes.
      collections: List of graph collections keys. The new variable is added to
        these collections. Defaults to `[GraphKeys.VARIABLES]`.
      validate_shape: If `False`, allows the variable to be initialized with a
        value of unknown shape. If `True`, the default, the shape of
        `initial_value` must be known.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      expected_shape: A TensorShape. If set, initial_value is expected
        to have this shape.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
    """
        if initial_value is None:
            raise ValueError("initial_value must be specified.")
        init_from_fn = callable(initial_value)
        if init_from_fn and dtype is None:
            raise ValueError(
                "dtype must also be specified when initial_value is callable.")

        if collections is None:
            collections = [ops.GraphKeys.VARIABLES]
        if not isinstance(collections, (list, tuple, set)):
            raise ValueError(
                "collections argument to Variable constructor must be a list, tuple, "
                "or set. Got %s of type %s" % (collections, type(collections)))
        if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
            collections = list(collections) + [
                ops.GraphKeys.TRAINABLE_VARIABLES
            ]
        expected_shape = tensor_shape.as_shape(expected_shape)
        with ops.control_dependencies(None):
            with ops.name_scope(
                    name, "Variable",
                [] if init_from_fn else [initial_value]) as name:

                # Get the initial value from a callable function. The real shape of the
                # variable will be set later, since under the init_from_fn case, the
                # shape won't be known until after the function is invoked.
                #
                # NOTE: The current Variable OpKernel does not support
                # partially defined shapes, so we only set the shape if it is
                # fully defined. For historical reasons, we use the scalar
                # shape (`[]`) to represent an unknown or partially known
                # shape. A future version of the Variable ops will remove this
                # limitation.
                def full_shape_to_list(shape):
                    """Returns shape as a list if shape is fully defined."""
                    if shape and shape.is_fully_defined():
                        return shape.as_list()
                    else:
                        return []

                def assert_expected_shape():
                    """Asserts that the initial value has the expected shape."""
                    if expected_shape:
                        expected_shape.assert_is_compatible_with(
                            self._initial_value.get_shape())

                if init_from_fn:
                    expected_shape_list = full_shape_to_list(expected_shape)
                    set_shape = validate_shape and expected_shape.is_fully_defined(
                    )
                    self._variable = state_ops.variable_op(expected_shape_list,
                                                           dtype.base_dtype,
                                                           set_shape=set_shape,
                                                           name=name)
                    with ops.colocate_with(self._variable.op):
                        with ops.name_scope("Initializer"):
                            # Colocate the tensors created by the initial_value() function
                            # with the variable itself.
                            self._initial_value = ops.convert_to_tensor(
                                initial_value(),
                                name="initial_value",
                                dtype=dtype)
                            assert_expected_shape()

                # Or get the initial value from a Tensor or Python object.
                else:
                    self._initial_value = ops.convert_to_tensor(
                        initial_value, name="initial_value", dtype=dtype)
                    assert_expected_shape()
                    set_shape = (
                        validate_shape
                        and self._initial_value.get_shape().is_fully_defined())
                    # In this case, the variable op can't be created until after the
                    # initial_value has been converted to a Tensor with a known type.
                    self._variable = state_ops.variable_op(
                        full_shape_to_list(self._initial_value.get_shape()),
                        self._initial_value.dtype.base_dtype,
                        set_shape=set_shape,
                        name=name)

                # Manually overrides the variable's shape with the initial value's.
                if validate_shape:
                    initial_value_shape = self._initial_value.get_shape()
                    if not initial_value_shape.is_fully_defined():
                        raise ValueError(
                            "initial_value must have a shape specified: %s" %
                            self._initial_value)
                    self._variable.set_shape(initial_value_shape)
                    # TODO(b/28152992): Remove the below hack modifying the node_def shape
                    # directly once set_shape() handles it.
                    self._variable.op.node_def.attr["shape"].shape.CopyFrom(
                        initial_value_shape.as_proto())

                # Assigns initial value.
                self._initializer_op = state_ops.assign(
                    self._variable,
                    self._initial_value,
                    validate_shape=validate_shape).op

                # TODO(vrv): Change this class to not take caching_device, but
                # to take the op to colocate the snapshot with, so we can use
                # colocation rather than devices.
                if caching_device is not None:
                    with ops.device(caching_device):
                        self._snapshot = array_ops.identity(self._variable,
                                                            name="read")
                else:
                    with ops.colocate_with(self._variable.op):
                        self._snapshot = array_ops.identity(self._variable,
                                                            name="read")

        ops.add_to_collections(collections, self)
        self._caching_device = caching_device
        self._save_slice_info = None
Exemple #52
0
  def get_variable(self, name, shape=None, dtype=types.float32,
                   initializer=None, reuse=None, trainable=True,
                   collections=None):
    """Gets an existing variable with these parameters or create a new one.

    If a variable with the given name is already stored, we return the stored
    variable. Otherwise, we create a new one.

    Set `reuse` to `True` when you only want to reuse existing Variables.
    Set `reuse` to `False` when you only want to create new Variables.
    If `reuse` is `None` (the default), both new and existing variables are
    returned.

    If initializer is `None` (the default), the default initializer passed in
    the constructor is used. If that one is `None` too, we use a new
    `UniformUnitScalingInitializer`.

    Args:
      name: the name of the new or existing variable.
      shape: shape of the new or existing variable.
      dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
      initializer: initializer for the variable.
      reuse: a Boolean or `None`. Controls reuse or creation of variables.
      trainable: If `True` also add the variable to the graph collection
        `GraphKeys.TRAINABLE_VARIABLES` (see variables.Variable).
      collections: List of graph collections keys to add the Variable to.
        Defaults to `[GraphKeys.VARIABLES]` (see variables.Variable).

    Returns:
      The created or existing variable.

    Raises:
      ValueError: when creating a new variable and shape is not declared,
        when reusing a variable and specifying a conflicting shape,
        or when violating reuse during variable creation.
    """
    should_check = reuse is not None
    dtype = types.as_dtype(dtype)
    shape = tensor_shape.as_shape(shape)
    if name in self._vars:
      # Here we handle the case when returning an existing variable.
      if should_check and not reuse:
        raise ValueError("Over-sharing: Variable %s already exists, disallowed."
                         " Did you mean to set reuse=True in VarScope?" % name)
      found_var = self._vars[name]
      if not shape.is_compatible_with(found_var.get_shape()):
        raise ValueError("Trying to share variable %s, but specified shape %s"
                         " and found shape %s." % (name, str(shape),
                                                   str(found_var.get_shape())))
      if not dtype.is_compatible_with(found_var.dtype):
        dtype_str = dtype.name
        found_type_str = found_var.dtype.name
        raise ValueError("Trying to share variable %s, but specified dtype %s"
                         " and found dtype %s." % (name, str(dtype_str),
                                                   str(found_type_str)))
      return found_var

    # The code below handles only the case of creating a new variable.
    if should_check and reuse:
      raise ValueError("Under-sharing: Variable %s does not exist, disallowed."
                       " Did you mean to set reuse=None in VarScope?" % name)
    if not shape.is_fully_defined():
      raise ValueError("Shape of a new variable (%s) must be fully defined, "
                       "but instead was %s." % (name, shape))
    if initializer is None:
      initializer = init_ops.uniform_unit_scaling_initializer()
    with ops.name_scope(name + "/Initializer/"):
      init_val = initializer(shape.as_list(), dtype=dtype)
    v = variables.Variable(init_val, name=name, trainable=trainable,
                           collections=collections)
    self._vars[name] = v
    logging.info("Created variable %s with shape %s and init %s", v.name,
                 format(shape), str(initializer))
    return v
Exemple #53
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
  """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-0 or rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.

  Raises:
    ValueError: If the shape is rank-0 and is not statically known to be -1.
  """
  if isinstance(tensor, ops.EagerTensor):
    return tensor_shape.as_shape(
        [dim if dim != -1 else None for dim in tensor.numpy()])

  if tensor.get_shape().ndims == 0:
    value = constant_value(tensor)
    if value is None:
      raise ValueError(
          "Received a scalar with unknown value as shape; require a statically "
          "known scalar with value '-1' to describe an unknown shape.")
    if value != -1:
      raise ValueError(
          "Received a scalar value '%s' as shape; require a statically known "
          "scalar with value '-1' to describe an unknown shape." % value)
    return tensor_shape.unknown_shape()

  shape = tensor.get_shape().with_rank(1)
  if shape == [0]:
    return tensor_shape.scalar()
  elif tensor.op.type == "Shape":
    return tensor.op.inputs[0].get_shape()
  elif tensor.op.type == "Pack":
    ret = tensor_shape.scalar()  # Empty list.
    # Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
    # would not be rank 1.
    assert tensor.op.get_attr("axis") == 0
    for pack_input in tensor.op.inputs:
      # `pack_input` must be a scalar. Attempt to evaluate it, and append it
      # to `ret`.
      pack_input_val = constant_value(pack_input)
      if pack_input_val is None or pack_input_val < 0:
        new_dim = tensor_shape.Dimension(None)
      else:
        new_dim = tensor_shape.Dimension(pack_input_val)
      ret = ret.concatenate([new_dim])
    return ret
  elif tensor.op.type == "Concat":
    # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
    # the only legal value when concatenating vectors, and it will
    # have been checked by a previous shape function.
    ret = tensor_shape.scalar()  # Empty list.
    for concat_input in tensor.op.inputs[1:]:
      # `concat_input` must be a vector. Attempt to evaluate it as a shape,
      # and concatenate it with `ret`.
      ret = ret.concatenate(constant_value_as_shape(concat_input))
    return ret
  elif tensor.op.type == "ConcatV2":
    # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
    # the only legal value when concatenating vectors, and it will
    # have been checked by a previous shape function.
    ret = tensor_shape.scalar()  # Empty list.
    for concat_input in tensor.op.inputs[:-1]:
      # `concat_input` must be a vector. Attempt to evaluate it as a shape,
      # and concatenate it with `ret`.
      ret = ret.concatenate(constant_value_as_shape(concat_input))
    return ret
  elif tensor.op.type == "StridedSlice":
    try:
      begin = constant_value(tensor.op.inputs[1])
      end = constant_value(tensor.op.inputs[2])
      strides = constant_value(tensor.op.inputs[3])
      if begin is not None and end is not None and strides is not None:
        begin = begin[0]
        end = end[0]
        strides = strides[0]
        begin_mask = tensor.op.get_attr("begin_mask")
        if begin_mask == 1:
          begin = None
        end_mask = tensor.op.get_attr("end_mask")
        if end_mask == 1:
          end = None

        ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
        new_axis_mask = tensor.op.get_attr("new_axis_mask")
        shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
        valid_attributes = (not ellipsis_mask and not new_axis_mask and
                            not shrink_axis_mask and (not begin_mask or
                                                      (begin_mask == 1)) and
                            (not end_mask or (end_mask == 1)))
        if valid_attributes:  # additional inputs not supported
          prev = constant_value_as_shape(tensor.op.inputs[0])
          prev = prev[begin:end:strides]
          ret = tensor_shape.TensorShape(prev)
          return ret

    except ValueError:  # Could come from get_attr or slicing prev.
      pass
    except TypeError:  # Could come from slicing prev.
      pass

  ret = tensor_shape.unknown_shape(shape.dims[0].value)
  value = constant_value(tensor)
  if value is not None:
    ret = ret.merge_with(
        tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
  return ret
Exemple #54
0
    def get_variable(self,
                     name,
                     shape=None,
                     dtype=dtypes.float32,
                     initializer=None,
                     regularizer=None,
                     reuse=None,
                     trainable=True,
                     collections=None,
                     caching_device=None):
        """Gets an existing variable with these parameters or create a new one.

        If a variable with the given name is already stored, we return the stored
        variable. Otherwise, we create a new one.

        Set `reuse` to `True` when you only want to reuse existing Variables.
        Set `reuse` to `False` when you only want to create new Variables.
        If `reuse` is `None` (the default), both new and existing variables are
        returned.

        If initializer is `None` (the default), the default initializer passed in
        the constructor is used. If that one is `None` too, we use a new
        `UniformUnitScalingInitializer`. If initializer is a Tensor, we use
        it as a value and derive the shape from the initializer.

        Args:
          name: the name of the new or existing variable.
          shape: shape of the new or existing variable.
          dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
          initializer: initializer for the variable.
          regularizer: a (Tensor -> Tensor or None) function; the result of
            applying it on a newly created variable will be added to the collection
            GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
          reuse: a Boolean or `None`. Controls reuse or creation of variables.
          trainable: If `True` also add the variable to the graph collection
            `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
          collections: List of graph collections keys to add the Variable to.
            Defaults to `[GraphKeys.VARIABLES]` (see tf.Variable).
          caching_device: Optional device string or function describing where the
            Variable should be cached for reading.  Defaults to the Variable's
            device.  If not `None`, caches on another device.  Typical use is to
            cache on the device where the Ops using the Variable reside, to
            deduplicate copying through `Switch` and other conditional statements.

        Returns:
          The created or existing variable.

        Raises:
          ValueError: when creating a new variable and shape is not declared,
            when reusing a variable and specifying a conflicting shape,
            or when violating reuse during variable creation.
        """
        # Set to true if initializer is a constant.
        initializing_from_value = False
        if initializer is not None and isinstance(initializer, ops.Tensor):
            initializing_from_value = True
        if shape is not None and initializing_from_value:
            raise ValueError(
                "If initializer is a constant, do not specify shape.")

        should_check = reuse is not None
        dtype = dtypes.as_dtype(dtype)
        shape = tensor_shape.as_shape(shape)

        if name in self._vars:
            # Here we handle the case when returning an existing variable.
            if should_check and not reuse:
                raise ValueError(
                    "Variable %s already exists, disallowed."
                    " Did you mean to set reuse=True in VarScope?" % name)
            found_var = self._vars[name]
            if not shape.is_compatible_with(found_var.get_shape()):
                raise ValueError(
                    "Trying to share variable %s, but specified shape %s"
                    " and found shape %s." %
                    (name, shape, found_var.get_shape()))
            if not dtype.is_compatible_with(found_var.dtype):
                dtype_str = dtype.name
                found_type_str = found_var.dtype.name
                raise ValueError(
                    "Trying to share variable %s, but specified dtype %s"
                    " and found dtype %s." % (name, dtype_str, found_type_str))
            return found_var

        # The code below handles only the case of creating a new variable.
        if should_check and reuse:
            raise ValueError("Variable %s does not exist, disallowed."
                             " Did you mean to set reuse=None in VarScope?" %
                             name)
        if not shape.is_fully_defined() and not initializing_from_value:
            raise ValueError(
                "Shape of a new variable (%s) must be fully defined, "
                "but instead was %s." % (name, shape))

        # Create the tensor to initialize the variable.
        if initializer is None:
            initializer = init_ops.uniform_unit_scaling_initializer()
        # Clear control dependencies while creating the initializer.
        with ops.control_dependencies(None):
            if initializing_from_value:
                init_val = initializer
            else:
                with ops.name_scope(name + "/Initializer/"):
                    init_val = initializer(shape.as_list(), dtype=dtype)

        # Create the variable.
        v = variables.Variable(init_val,
                               name=name,
                               trainable=trainable,
                               collections=collections,
                               caching_device=caching_device)
        self._vars[name] = v
        logging.info("Created variable %s with shape %s and init %s", v.name,
                     format(shape), initializer)

        # Run the regularizer if requested and save the resulting loss.
        if regularizer:
            with ops.name_scope(name + "/Regularizer/"):
                loss = regularizer(v)
            if loss:
                logging.info(
                    "Applied regularizer to %s and added the result %s to "
                    "REGULARIZATION_LOSSES.", v.name, loss.name)
                ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
                                      loss)

        return v
def constant(value, dtype=None, shape=None, name="Const", verify_shape=False):
    """Creates a constant tensor.

  The resulting tensor is populated with values of type `dtype`, as
  specified by arguments `value` and (optionally) `shape` (see examples
  below).

  The argument `value` can be a constant value, or a list of values of type
  `dtype`. If `value` is a list, then the length of the list must be less
  than or equal to the number of elements implied by the `shape` argument (if
  specified). In the case where the list length is less than the number of
  elements specified by `shape`, the last element in the list will be used
  to fill the remaining entries.

  The argument `shape` is optional. If present, it specifies the dimensions of
  the resulting tensor. If not present, the shape of `value` is used.

  If the argument `dtype` is not specified, then the type is inferred from
  the type of `value`.

  For example:

  ```python
  # Constant 1-D Tensor populated with value list.
  tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]

  # Constant 2-D tensor populated with scalar value -1.
  tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
                                               [-1. -1. -1.]]
  ```

  Args:
    value:          A constant value (or list) of output type `dtype`.

    dtype:          The type of the elements of the resulting tensor.

    shape:          Optional dimensions of resulting tensor.

    name:           Optional name for the tensor.

    verify_shape:   Boolean that enables verification of a shape of values.

  Returns:
    A Constant Tensor.

  Raises:
    TypeError: if shape is incorrectly specified or unsupported.
  """
    ctx = context.context()
    if not ctx.in_graph_mode():
        t = convert_to_eager_tensor(value, ctx, dtype)
        if shape is None:
            return t
        shape = tensor_shape.as_shape(shape)
        if shape == t.shape:
            return t
        if verify_shape:
            raise TypeError("Expected Tensor's shape: %s, got %s." %
                            (tuple(shape), tuple(t.shape)))
        num_t = t.shape.num_elements()
        # TODO(josh11b): Implement shape -> eager tensor conversion.
        if num_t == shape.num_elements():
            return _eager_reshape(t, shape.as_list(), ctx)
        if num_t == 1:
            if t.dtype == dtypes.bool:
                # We don't have a Fill kernel for bool dtype on GPU. So we first run
                # Fill on CPU and then copy to GPU if needed.
                with ops.device("/device:CPU:0"):
                    x = _eager_fill(shape.as_list(), t.cpu(), ctx)
                return _eager_identity(x, ctx)
            else:
                return _eager_fill(shape.as_list(), t, ctx)
        raise TypeError(
            "Eager execution of tf.constant with unsupported shape "
            "(value has %d elements, shape is %s with %d elements)." %
            (num_t, shape, shape.num_elements()))
    g = ops.get_default_graph()
    tensor_value = attr_value_pb2.AttrValue()
    tensor_value.tensor.CopyFrom(
        tensor_util.make_tensor_proto(value,
                                      dtype=dtype,
                                      shape=shape,
                                      verify_shape=verify_shape))
    dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
    const_tensor = g.create_op("Const", [], [dtype_value.type],
                               attrs={
                                   "value": tensor_value,
                                   "dtype": dtype_value
                               },
                               name=name).outputs[0]
    return const_tensor
def call_cpp_shape_fn(op,
                      input_tensors_needed=None,
                      input_tensors_as_shapes_needed=None,
                      debug_python_shape_fn=None,
                      require_shape_fn=True):
    """A shape function that delegates to the registered C++ shape function.

  Args:
    op: the node in the graph for which to compute output shapes.
    input_tensors_needed: a list of input tensor indices for which to compute
      the input tensor's value and pass to the C++ shape function.
    input_tensors_as_shapes_needed: a list of input tensor indices for which to
      compute the constant_value_as_shape and pass to the C++ shape function.
    debug_python_shape_fn: For testing only during migration to using
      call_cpp_shape_fn. Do not submit calls that set this,
      as the comparison is slow. If non-None, the python shape function;
      this function will be called and its output compared to that of
      the C++ shape function.
    require_shape_fn: If true, and the C++ shape function is not registered
      in the current binary then an exception is raised; otherwise, if the
      C++ shape function is not registered then unknown_shape is used.

  Returns:
    A dictionary with the following keys:
      shapes: A TensorShape list of the output shapes of the op, as computed
        using the C++ shape inference function registered for the op.
      handle_shapes: A TensorShape list of the shapes for handle outputs, if
         any.
      handle_dtypes: A list of DataType enums for the handle outputs, if any.

  Raises:
    ValueError: If the C++ shape function returned an error (e.g. because the
      shapes of the inputs are of the wrong rank or otherwise incompatible
      according to the shape function).
    RuntimeError: If the C++ shape function is not registered and
      <require_shape_fn> is True.
  """
    if op.type == "Const":
        # To avoid serializing large constants, we special-case constant
        # here, even though it has a C++ shape function.  When Python
        # calls the C / C-API directly, we should be able to remove this.
        return {
            "shapes":
            [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
            "handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
            "handle_dtypes": [types_pb2.DT_INVALID]
        }

    node_def_str = op.node_def.SerializeToString()

    def tensor_to_inference_result(t):
        r = cpp_shape_inference_pb2.CppShapeInferenceResult()
        r.shape.CopyFrom(t.get_shape().as_proto())
        # pylint: disable=protected-access
        r.handle_shape.CopyFrom(t._handle_shape)
        r.handle_dtype = t._handle_dtype
        # pylint: enable=protected-access
        return r.SerializeToString()

    input_shapes = [tensor_to_inference_result(i) for i in op.inputs]

    input_tensors = [None for i in input_shapes]
    if input_tensors_needed:
        for idx in input_tensors_needed:
            v = tensor_util.constant_value(op.inputs[idx])
            if v is not None:
                input_tensors[idx] = np.asarray(v)

    serialized_unknown_shape = (
        tensor_shape.TensorShape(None).as_proto().SerializeToString())
    arr = [serialized_unknown_shape for i in input_shapes]
    if input_tensors_as_shapes_needed:
        for idx in input_tensors_as_shapes_needed:
            s = tensor_util.constant_value_as_shape(op.inputs[idx])
            if s is not None:
                arr[idx] = s.as_proto().SerializeToString()
    input_tensors_as_shapes = arr

    missing_shape_fn = False
    try:
        with errors.raise_exception_on_not_ok_status() as status:
            output_shapes = pywrap_tensorflow.RunCppShapeInference(
                node_def_str, input_shapes, input_tensors,
                input_tensors_as_shapes, status)
    except errors.InvalidArgumentError as err:
        if err.message.startswith("No shape inference function exists for op"):
            missing_shape_fn = True
        else:
            raise ValueError(err.message)

    if missing_shape_fn:
        if require_shape_fn:
            raise RuntimeError(
                "No C++ shape function registered for standard op: %s" %
                op.type)
        return unknown_shape(op)

    # Convert TensorShapeProto values in output_shapes.
    result_protos = [
        cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
        for s in output_shapes
    ]
    result = [r.shape for r in result_protos]
    result_handle_shapes = [r.handle_shape for r in result_protos]
    result_handle_dtypes = [r.handle_dtype for r in result_protos]

    if debug_python_shape_fn:
        try:
            python_result = [
                tensor_shape.as_shape(s) for s in debug_python_shape_fn(op)
            ]
        except Exception as err:
            raise AssertionError("Python shape function return error but "
                                 "C++ shape functon did not: %s" % str(err))
        result_as_shapes = [tensor_shape.as_shape(s) for s in result]
        if str(result_as_shapes) != str(python_result):
            raise ValueError(
                ("Python vs CPP shape mismatch.  "
                 "CPP: %s vs python: %s on node %s "
                 "with input shapes %s") %
                (str(result_as_shapes), str(python_result), str(op.node_def),
                 ",".join([str(i.get_shape()) for i in op.inputs])))

    return {
        "shapes": result,
        "handle_shapes": result_handle_shapes,
        "handle_dtypes": result_handle_dtypes
    }
Exemple #57
0
class StructureTest(test_base.DatasetTestBase, parameterized.TestCase):

    # NOTE(mrry): The arguments must be lifted into lambdas because otherwise they
    # will be executed before the (eager- or graph-mode) test environment has been
    # set up.
    # pylint: disable=g-long-lambda,protected-access
    @parameterized.parameters(
        (lambda: constant_op.constant(37.0), structure.TensorStructure,
         [dtypes.float32], [[]]),
        (lambda: tensor_array_ops.TensorArray(
            dtype=dtypes.float32, element_shape=(3, ), size=0),
         structure.TensorArrayStructure, [dtypes.variant], [None, 3]),
        (lambda: sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
         structure.SparseTensorStructure, [dtypes.variant], [None]),
        (lambda: (constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
         structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
        (lambda: {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]
                                                                       ]),
        (lambda: {
            "a":
            constant_op.constant(37.0),
            "b":
            (sparse_tensor.
             SparseTensor(indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
             sparse_tensor.SparseTensor(
                 indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
        }, structure.NestedStructure,
         [dtypes.float32, dtypes.variant, dtypes.variant], [[], None, None]))
    def testFlatStructure(self, value_fn, expected_structure, expected_types,
                          expected_shapes):
        value = value_fn()
        s = structure.Structure.from_value(value)
        self.assertIsInstance(s, expected_structure)
        self.assertEqual(expected_types, s._flat_types)
        for expected, actual in zip(expected_shapes, s._flat_shapes):
            self.assertTrue(actual.is_compatible_with(expected))
            self.assertTrue(
                tensor_shape.as_shape(expected).is_compatible_with(actual))

    @parameterized.parameters(
        (lambda: constant_op.constant(37.0), lambda: [
            constant_op.constant(38.0),
            array_ops.placeholder(dtypes.float32),
            variables.Variable(100.0), 42.0,
            np.array(42.0, dtype=np.float32)
        ],
         lambda: [constant_op.constant([1.0, 2.0]),
                  constant_op.constant(37)]),
        (lambda: tensor_array_ops.TensorArray(
            dtype=dtypes.float32, element_shape=(3, ), size=0), lambda: [
                tensor_array_ops.TensorArray(
                    dtype=dtypes.float32, element_shape=(3, ), size=0),
                tensor_array_ops.TensorArray(
                    dtype=dtypes.float32, element_shape=(3, ), size=10)
            ], lambda: [
                tensor_array_ops.TensorArray(
                    dtype=dtypes.int32, element_shape=(3, ), size=0),
                tensor_array_ops.TensorArray(
                    dtype=dtypes.float32, element_shape=(), size=0)
            ]),
        (lambda: sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]), lambda: [
                sparse_tensor.SparseTensor(indices=[[1, 1], [3, 4]],
                                           values=[10, -1],
                                           dense_shape=[4, 5]),
                sparse_tensor.SparseTensorValue(indices=[[1, 1], [3, 4]],
                                                values=[10, -1],
                                                dense_shape=[4, 5]),
                array_ops.sparse_placeholder(dtype=dtypes.int32),
                array_ops.sparse_placeholder(dtype=dtypes.int32,
                                             shape=[None, None])
            ], lambda: [
                constant_op.constant(37, shape=[4, 5]),
                sparse_tensor.SparseTensor(
                    indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
                array_ops.sparse_placeholder(dtype=dtypes.int32,
                                             shape=[None, None, None]),
                sparse_tensor.SparseTensor(
                    indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
            ]),
        (lambda: {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, lambda: [{
            "a": constant_op.constant(15.0),
            "b": constant_op.constant([4, 5, 6])
        }], lambda: [{
            "a": constant_op.constant(15.0),
            "b": constant_op.constant([4, 5, 6, 7])
        }, {
            "a": constant_op.constant(15),
            "b": constant_op.constant([4, 5, 6])
        }, {
            "a":
            constant_op.constant(15),
            "b":
            sparse_tensor.SparseTensor(
                indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
        }, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
    )
    @test_util.run_deprecated_v1
    def testIsCompatibleWithStructure(self, original_value_fn,
                                      compatible_values_fn,
                                      incompatible_values_fn):
        original_value = original_value_fn()
        compatible_values = compatible_values_fn()
        incompatible_values = incompatible_values_fn()
        s = structure.Structure.from_value(original_value)
        for compatible_value in compatible_values:
            self.assertTrue(
                s.is_compatible_with(
                    structure.Structure.from_value(compatible_value)))
        for incompatible_value in incompatible_values:
            self.assertFalse(
                s.is_compatible_with(
                    structure.Structure.from_value(incompatible_value)))

    @parameterized.parameters(
        (lambda: constant_op.constant(37.0), ),
        (lambda: sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]), ),
        (lambda: tensor_array_ops.TensorArray(
            dtype=dtypes.float32, element_shape=(), size=1).write(0, 7)),
        (lambda: {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, ),
        (lambda: {
            "a":
            constant_op.constant(37.0),
            "b":
            (sparse_tensor.
             SparseTensor(indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
             sparse_tensor.SparseTensor(
                 indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
        }, ),
    )
    def testRoundTripConversion(self, value_fn):
        value = value_fn()
        s = structure.Structure.from_value(value)

        def maybe_stack_ta(v):
            if isinstance(v, tensor_array_ops.TensorArray):
                return v.stack()
            else:
                return v

        before = self.evaluate(maybe_stack_ta(value))
        after = self.evaluate(
            maybe_stack_ta(s._from_tensor_list(s._to_tensor_list(value))))

        flat_before = nest.flatten(before)
        flat_after = nest.flatten(after)
        for b, a in zip(flat_before, flat_after):
            if isinstance(b, sparse_tensor.SparseTensorValue):
                self.assertAllEqual(b.indices, a.indices)
                self.assertAllEqual(b.values, a.values)
                self.assertAllEqual(b.dense_shape, a.dense_shape)
            else:
                self.assertAllEqual(b, a)

    # pylint: enable=g-long-lambda

    def testIncompatibleStructure(self):
        # Define three mutually incompatible values/structures, and assert that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.
        value_tensor = constant_op.constant(42.0)
        s_tensor = structure.Structure.from_value(value_tensor)
        flat_tensor = s_tensor._to_tensor_list(value_tensor)

        value_sparse_tensor = sparse_tensor.SparseTensor(indices=[[0, 0]],
                                                         values=[1],
                                                         dense_shape=[1, 1])
        s_sparse_tensor = structure.Structure.from_value(value_sparse_tensor)
        flat_sparse_tensor = s_sparse_tensor._to_tensor_list(
            value_sparse_tensor)

        value_nest = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_nest = structure.Structure.from_value(value_nest)
        flat_nest = s_nest._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                r"SparseTensor.* is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_sparse_tensor)
        with self.assertRaisesRegexp(
                ValueError,
                r"Value \{.*\} is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*float32.* and shape \(\)"):
            s_tensor._from_tensor_list(flat_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "TensorStructure corresponds to a single tf.Tensor."):
            s_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_sparse_tensor)

    def testIncompatibleNestedStructure(self):
        # Define three mutually incompatible nested values/structures, and assert
        # that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.

        value_0 = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_0 = structure.Structure.from_value(value_0)
        flat_s_0 = s_0._to_tensor_list(value_0)

        # `value_1` has compatible nested structure with `value_0`, but different
        # classes.
        value_1 = {
            "a":
            constant_op.constant(37.0),
            "b":
            sparse_tensor.SparseTensor(indices=[[0, 0]],
                                       values=[1],
                                       dense_shape=[1, 1])
        }
        s_1 = structure.Structure.from_value(value_1)
        flat_s_1 = s_1._to_tensor_list(value_1)

        # `value_2` has incompatible nested structure with `value_0` and `value_1`.
        value_2 = {
            "a":
            constant_op.constant(37.0),
            "b": (sparse_tensor.SparseTensor(indices=[[0, 0]],
                                             values=[1],
                                             dense_shape=[1, 1]),
                  sparse_tensor.SparseTensor(indices=[[3, 4]],
                                             values=[-1],
                                             dense_shape=[4, 5]))
        }
        s_2 = structure.Structure.from_value(value_2)
        flat_s_2 = s_2._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure"):
            s_0._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*SparseTensorStructure"):
            s_1._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        # NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
        # needs to account for "a" coming before or after "b". It might be worth
        # adding a deterministic repr for these error messages (among other
        # improvements).
        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.*Tensor.* not compatible with the nested structure "
                ".*(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError, "(Tensor.*SparseTensor|SparseTensor.*Tensor).* "
                "not compatible with the nested structure .*"
                "(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*int32.* and shape \(3,\)"):
            s_0._from_tensor_list(flat_s_1)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_0._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_1._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_1._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_1)

    @parameterized.named_parameters(
        ("Tensor", dtypes.float32, tensor_shape.scalar(), ops.Tensor,
         structure.TensorStructure(dtypes.float32, [])),
        ("SparseTensor", dtypes.int32, tensor_shape.matrix(
            2, 2), sparse_tensor.SparseTensor,
         structure.SparseTensorStructure(dtypes.int32, [2, 2])),
        ("TensorArray0", dtypes.int32, tensor_shape.as_shape(
            [None, True, 2, 2]), tensor_array_ops.TensorArray,
         structure.TensorArrayStructure(
             dtypes.int32, [2, 2], dynamic_size=None, infer_shape=True)),
        ("TensorArray1", dtypes.int32, tensor_shape.as_shape(
            [True, None, 2, 2]), tensor_array_ops.TensorArray,
         structure.TensorArrayStructure(
             dtypes.int32, [2, 2], dynamic_size=True, infer_shape=None)),
        ("TensorArray2", dtypes.int32,
         tensor_shape.as_shape([True, False, 2, 2
                                ]), tensor_array_ops.TensorArray,
         structure.TensorArrayStructure(
             dtypes.int32, [2, 2], dynamic_size=True, infer_shape=False)),
        ("Nest", {
            "a": dtypes.float32,
            "b": (dtypes.int32, dtypes.string)
        }, {
            "a": tensor_shape.scalar(),
            "b": (tensor_shape.matrix(2, 2), tensor_shape.scalar())
        }, {
            "a": ops.Tensor,
            "b": (sparse_tensor.SparseTensor, ops.Tensor)
        },
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, []),
             "b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
                   structure.TensorStructure(dtypes.string, []))
         })),
    )
    def testConvertLegacyStructure(self, output_types, output_shapes,
                                   output_classes, expected_structure):
        actual_structure = structure.convert_legacy_structure(
            output_types, output_shapes, output_classes)
        self.assertTrue(
            expected_structure.is_compatible_with(actual_structure))
        self.assertTrue(
            actual_structure.is_compatible_with(expected_structure))

    def testNestedNestedStructure(self):
        # Although `Structure.from_value()` will not construct one, a nested
        # structure containing nested `NestedStructure` objects can occur if a
        # structure is constructed manually.
        s = structure.NestedStructure(
            (structure.TensorStructure(dtypes.int64, []),
             structure.NestedStructure(
                 (structure.TensorStructure(dtypes.float32, []),
                  structure.TensorStructure(dtypes.string, [])))))

        int64_t = constant_op.constant(37, dtype=dtypes.int64)
        float32_t = constant_op.constant(42.0)
        string_t = constant_op.constant("Foo")

        nested_tensors = (int64_t, (float32_t, string_t))

        tensor_list = s._to_tensor_list(nested_tensors)
        for expected, actual in zip([int64_t, float32_t, string_t],
                                    tensor_list):
            self.assertIs(expected, actual)

        (actual_int64_t, (actual_float32_t,
                          actual_string_t)) = s._from_tensor_list(tensor_list)
        self.assertIs(int64_t, actual_int64_t)
        self.assertIs(float32_t, actual_float32_t)
        self.assertIs(string_t, actual_string_t)

        (actual_int64_t,
         (actual_float32_t,
          actual_string_t)) = (s._from_compatible_tensor_list(tensor_list))
        self.assertIs(int64_t, actual_int64_t)
        self.assertIs(float32_t, actual_float32_t)
        self.assertIs(string_t, actual_string_t)

    @parameterized.named_parameters(
        ("Tensor", structure.TensorStructure(dtypes.float32, []), 32,
         structure.TensorStructure(dtypes.float32, [32])),
        ("TensorUnknown", structure.TensorStructure(dtypes.float32, []), None,
         structure.TensorStructure(dtypes.float32, [None])),
        ("SparseTensor", structure.SparseTensorStructure(
            dtypes.float32, [None]), 32,
         structure.SparseTensorStructure(dtypes.float32, [32, None])),
        ("SparseTensorUnknown",
         structure.SparseTensorStructure(dtypes.float32, [4]), None,
         structure.SparseTensorStructure(dtypes.float32, [None, 4])),
        ("Nest",
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, []),
             "b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
                   structure.TensorStructure(dtypes.string, []))
         }), 128,
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, [128]),
             "b": (structure.SparseTensorStructure(dtypes.int32, [128, 2, 2]),
                   structure.TensorStructure(dtypes.string, [128]))
         })),
    )
    def testBatch(self, element_structure, batch_size,
                  expected_batched_structure):
        batched_structure = element_structure._batch(batch_size)
        self.assertTrue(
            batched_structure.is_compatible_with(expected_batched_structure))
        self.assertTrue(
            expected_batched_structure.is_compatible_with(batched_structure))

    @parameterized.named_parameters(
        ("Tensor", structure.TensorStructure(dtypes.float32, [32]),
         structure.TensorStructure(dtypes.float32, [])),
        ("TensorUnknown", structure.TensorStructure(dtypes.float32, [None]),
         structure.TensorStructure(dtypes.float32, [])),
        ("SparseTensor",
         structure.SparseTensorStructure(dtypes.float32, [32, None]),
         structure.SparseTensorStructure(dtypes.float32, [None])),
        ("SparseTensorUnknown",
         structure.SparseTensorStructure(dtypes.float32, [None, 4]),
         structure.SparseTensorStructure(dtypes.float32, [4])),
        ("Nest",
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, [128]),
             "b": (structure.SparseTensorStructure(dtypes.int32, [128, 2, 2]),
                   structure.TensorStructure(dtypes.string, [None]))
         }),
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, []),
             "b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
                   structure.TensorStructure(dtypes.string, []))
         })),
    )
    def testUnbatch(self, element_structure, expected_unbatched_structure):
        unbatched_structure = element_structure._unbatch()
        self.assertTrue(
            unbatched_structure.is_compatible_with(
                expected_unbatched_structure))
        self.assertTrue(
            expected_unbatched_structure.is_compatible_with(
                unbatched_structure))

    # pylint: disable=g-long-lambda
    @parameterized.named_parameters(
        ("Tensor", lambda: constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
         lambda: constant_op.constant([1.0, 2.0])),
        ("SparseTensor", lambda: sparse_tensor.SparseTensor(
            indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2]),
         lambda: sparse_tensor.SparseTensor(
             indices=[[0]], values=[13], dense_shape=[2])),
        ("Nest", lambda:
         (constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
          sparse_tensor.SparseTensor(
              indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2])),
         lambda: (constant_op.constant([1.0, 2.0]),
                  sparse_tensor.SparseTensor(
                      indices=[[0]], values=[13], dense_shape=[2]))),
    )
    def testToBatchedTensorList(self, value_fn, element_0_fn):
        batched_value = value_fn()
        s = structure.Structure.from_value(batched_value)
        batched_tensor_list = s._to_batched_tensor_list(batched_value)

        # The batch dimension is 2 for all of the test cases.
        # NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
        # tensors in which we store sparse tensors.
        for t in batched_tensor_list:
            if t.dtype != dtypes.variant:
                self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))

        # Test that the 0th element from the unbatched tensor is equal to the
        # expected value.
        expected_element_0 = self.evaluate(element_0_fn())
        unbatched_s = s._unbatch()
        actual_element_0 = unbatched_s._from_tensor_list(
            [t[0] for t in batched_tensor_list])

        for expected, actual in zip(nest.flatten(expected_element_0),
                                    nest.flatten(actual_element_0)):
            if sparse_tensor.is_sparse(expected):
                self.assertSparseValuesEqual(expected, actual)
            else:
                self.assertAllEqual(expected, actual)
Exemple #58
0
def _maybe_tensor_shape_from_tensor(shape):
    if isinstance(shape, tf.Tensor):
        return tensor_shape.as_shape(tf.get_static_value(shape))
    else:
        return shape
Exemple #59
0
 def __init__(self, call_fn, init_fn, shape_and_dtypes):
   self._init_fn = init_fn
   self._call_fn = call_fn
   self.shape_and_dtypes = shape_and_dtypes
   self.flattened_shapes = [tensor_shape.as_shape(sd.shape) for sd in
                            nest.flatten(self.shape_and_dtypes)]
Exemple #60
0
def create_partitioned_variables(shape,
                                 slicing,
                                 initializer,
                                 dtype=dtypes.float32,
                                 trainable=True,
                                 collections=None,
                                 name=None,
                                 reuse=None):
    """Create a list of partitioned variables according to the given `slicing`.

  Currently only one dimension of the full variable can be sliced, and the
  full variable can be reconstructed by the concatenation of the returned
  list along that dimension.

  Args:
    shape: List of integers.  The shape of the full variable.
    slicing: List of integers.  How to partition the variable.
      Must be of the same length as `shape`.  Each value
      indicate how many slices to create in the corresponding
      dimension.  Presently only one of the values can be more than 1;
      that is, the variable can only be sliced along one dimension.

      For convenience, The requested number of partitions does not have to
      divide the corresponding dimension evenly.  If it does not, the
      shapes of the partitions are incremented by 1 starting from partition
      0 until all slack is absorbed.  The adjustment rules may change in the
      future, but as you can save/restore these variables with different
      slicing specifications this should not be a problem.
    initializer: A `Tensor` of shape `shape` or a variable initializer
      function.  If a function, it will be called once for each slice,
      passing the shape and data type of the slice as parameters.  The
      function must return a tensor with the same shape as the slice.
    dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
    trainable: If True also add all the variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES`.
    collections: List of graph collections keys to add the variables to.
      Defaults to `[GraphKeys.VARIABLES]`.
    name: Optional name for the full variable.  Defaults to
      `"PartitionedVariable"` and gets uniquified automatically.
    reuse: Boolean or `None`; if `True` and name is set, it would reuse
      previously created variables. if `False` it will create new variables.
      if `None`, it would inherit the parent scope reuse.

  Returns:
    A list of Variables corresponding to the slicing.

  Raises:
    ValueError: If any of the arguments is malformed.
  """
    if len(shape) != len(slicing):
        raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
                         "must have the length: shape: %s, slicing: %s" %
                         (shape, slicing))
    if len(shape) < 1:
        raise ValueError("A partitioned Variable must have rank at least 1: "
                         "shape: %s" % shape)
    full_shape = tensor_shape.as_shape(shape)
    full_shape.assert_is_fully_defined()
    full_shape = full_shape.as_list()

    slice_dim, slice_shape = _compute_slice_dim_and_shape(full_shape, slicing)

    vs = []
    num_slices = slicing[slice_dim]
    num_slices_with_excess = full_shape[slice_dim] % num_slices

    with variable_scope.variable_op_scope([],
                                          name,
                                          "PartitionedVariable",
                                          reuse=reuse) as scope:
        full_name = scope.name
        slice_offset = [0] * len(full_shape)
        for i in xrange(num_slices):
            var_shape = slice_shape[:]
            var_offset = slice_offset[:]
            if i < num_slices_with_excess:
                var_shape[slice_dim] += 1
            slice_offset[slice_dim] += var_shape[slice_dim]

            if callable(initializer):
                init_val = initializer(var_shape, dtype=dtype)
                init_val = ops.convert_to_tensor(init_val, dtype=dtype)
            elif isinstance(initializer, ops.Tensor):
                init_val = array_ops.slice(initializer, var_offset, var_shape)
                # Use the dtype of the given tensor.
                dtype = init_val.dtype.base_dtype
            else:
                init_val = ops.convert_to_tensor(initializer, dtype=dtype)
                init_val = array_ops.slice(init_val, var_offset, var_shape)

            var = variable_scope.get_variable(name="part_%d" % i,
                                              dtype=dtype,
                                              initializer=init_val,
                                              trainable=trainable,
                                              collections=collections)

            # pylint: disable=protected-access
            var._set_save_slice_info(
                variables.Variable.SaveSliceInfo(full_name, full_shape,
                                                 var_offset, var_shape))
            # pylint: enable=protected-access
            vs.append(var)
        assert slice_offset[slice_dim] == full_shape[slice_dim]
    return vs