def testAssignNoShapeNoValidateShape(self):
   with self.test_session():
     value = self._NewShapelessTensor()
     var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
     self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
     self.assertEqual(tensor_shape.unknown_shape(),
                      tf.assign(var, value, validate_shape=False).get_shape())
Esempio n. 2
0
def _ReductionShape(op):
  """Common shape function for reduction ops."""
  input_shape = op.inputs[0].get_shape()
  reduction_indices = tensor_util.constant_value(op.inputs[1])
  keep_dims = op.get_attr("keep_dims")
  if reduction_indices is None or input_shape.ndims is None:
    if keep_dims:
      return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
    else:
      return [tensor_shape.unknown_shape()]

  # Turn reduction_indices from scalar to vector if necessary
  reduction_indices = np.ravel(reduction_indices)

  for reduction_index in reduction_indices:
    if reduction_index < 0 or reduction_index >= input_shape.ndims:
      raise ValueError("Invalid reduction dimension %d for input with %d "
                       "dimensions" % (reduction_index, input_shape.ndims))

  returned_dims = []
  if keep_dims:
    for i, dim in enumerate(input_shape.dims):
      if i in reduction_indices:
        returned_dims.append(1)
      else:
        returned_dims.append(dim)
  else:
    for i, dim in enumerate(input_shape.dims):
      if i not in reduction_indices:
        returned_dims.append(dim)
  return [tensor_shape.TensorShape(returned_dims)]
Esempio n. 3
0
 def testAssignNoShape(self):
   with self.cached_session():
     value = self._NewShapelessTensor()
     var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
     self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
     self.assertEqual(tensor_shape.unknown_shape(),
                      state_ops.assign(var, value).get_shape())
Esempio n. 4
0
  def testPartialShapes(self):
    np.random.seed(1618)

    # Input shape is unknown.
    reduction_axes = [1, 2]
    c_unknown = tf.placeholder(tf.float32)
    s_unknown = tf.reduce_sum(c_unknown, reduction_axes)
    self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())

    np_input = np.random.randn(3, 3, 3)
    self._compareAll(np_input, reduction_axes, {c_unknown: np_input})

    # Input shape only has known rank.
    c_known_rank = tf.placeholder(tf.float32)
    c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
    s_known_rank = tf.reduce_sum(c_known_rank, reduction_axes, keep_dims=True)
    self.assertEqual(3, s_known_rank.get_shape().ndims)

    np_input = np.random.randn(3, 3, 3)
    self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})

    # Reduction indices are unknown.
    unknown_indices = tf.placeholder(tf.int32)
    c_unknown_indices = tf.constant([[10.0], [20.0]])
    s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
                                     keep_dims=False)
    self.assertEqual(tensor_shape.unknown_shape(),
                     s_unknown_indices.get_shape())
    s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
                                          keep_dims=True)
    self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
  def testEquality(self):
    s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
        3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
    s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
        3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
    s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
                                   tensor_shape.Dimension(4), None])

    self.assertTrue(s1 == s2)
    self.assertFalse(s1 != s2)
    self.assertFalse(s1 == "a string")
    self.assertTrue(s1 != "a string")
    self.assertNotEqual(s1, "347", "Should not equal an ambiguous string.")
    self.assertEqual(s1, ["3", "4", "7"])

    # Test with an unknown shape in s3
    self.assertTrue(s1 != s3)
    self.assertFalse(s3 == "a string")
    self.assertTrue(s3 != "a string")

    # eq and neq are not symmetric for unknown shapes.
    unk0 = tensor_shape.unknown_shape()
    self.assertFalse(unk0 == s1)
    self.assertFalse(s1 == unk0)
    with self.assertRaises(ValueError):
      unk0 != s1  # pylint: disable=pointless-statement
    with self.assertRaises(ValueError):
      s1 != unk0  # pylint: disable=pointless-statement
    unk1 = tensor_shape.unknown_shape()
    self.assertTrue(unk0 == unk1)
    self.assertTrue(unk1 == unk0)
    with self.assertRaises(ValueError):
      unk0 != unk1  # pylint: disable=pointless-statement
    with self.assertRaises(ValueError):
      unk1 != unk0  # pylint: disable=pointless-statement
 def testAsList(self):
   with self.assertRaisesRegexp(ValueError,
                                "not defined on an unknown TensorShape"):
     tensor_shape.unknown_shape().as_list()
   self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())
   self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(
       (2, None, 4)).as_list())
Esempio n. 7
0
def _SliceShape(op):
  """Shape function for array_ops.slice."""
  input_shape = op.inputs[0].get_shape()
  begin_shape = op.inputs[1].get_shape().with_rank_at_most(1)
  sizes_shape = op.inputs[2].get_shape().with_rank_at_most(1)
  rank_vector_shape = begin_shape.merge_with(sizes_shape)
  ndims = rank_vector_shape.num_elements()
  if ndims is not None:
    input_shape.assert_has_rank(ndims)
  begin_value = tensor_util.ConstantValue(op.inputs[1])
  sizes_value = tensor_util.ConstantValue(op.inputs[2])
  if sizes_value is not None:
    returned_dims = []
    for i, slice_size in enumerate(sizes_value.ravel()):
      if slice_size != -1:
        returned_dims.append(slice_size)
      elif begin_value is not None:
        returned_dims.append(input_shape[i] - begin_value[i])
      else:
        returned_dims.append(None)
    return [tensor_shape.TensorShape(returned_dims)]
  else:
    if input_shape.ndims is not None:
      return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
    elif ndims is not None:
      return [tensor_shape.unknown_shape(ndims=ndims)]
    else:
      return [tensor_shape.unknown_shape()]
 def testAsProto(self):
   self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
   self.assertFalse(
       tensor_shape.unknown_shape(rank=3).as_proto().unknown_rank)
   self.assertFalse(
       tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
   self.assertFalse(
       tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
Esempio n. 9
0
def _sparse_shape(op):
  """Shape function for `SparseTensor` result."""
  num_rows = (op.inputs[0].get_shape()[0] if
              op.type in ("DenseToSparseOperation", "DenseToDenseOperation")
              else None)
  return [
      tensor_shape.TensorShape([num_rows, 2]),
      tensor_shape.unknown_shape(1),
      tensor_shape.unknown_shape(1),
  ]
Esempio n. 10
0
  def testStr(self):
    self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
    self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1)))
    self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2)))
    self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3)))

    self.assertEqual("()", str(tensor_shape.scalar()))
    self.assertEqual("(7,)", str(tensor_shape.vector(7)))
    self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
    self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))

    self.assertEqual("(32, ?, 1, 9)",
                     str(tensor_shape.TensorShape([32, None, 1, 9])))
Esempio n. 11
0
 def test_build_raw_serving_input_receiver_fn_without_shape(self):
   """Test case for issue #21178."""
   f = {"feature_1": array_ops.placeholder(dtypes.float32),
        "feature_2": array_ops.placeholder(dtypes.int32)}
   serving_input_receiver_fn = export.build_raw_serving_input_receiver_fn(f)
   v = serving_input_receiver_fn()
   self.assertTrue(isinstance(v, export.ServingInputReceiver))
   self.assertEqual(
       tensor_shape.unknown_shape(),
       v.receiver_tensors["feature_1"].shape)
   self.assertEqual(
       tensor_shape.unknown_shape(),
       v.receiver_tensors["feature_2"].shape)
  def test_to_feature_columns_and_input_fn(self):
    df = setup_test_df_3layer()
    feature_columns, input_fn = (
        estimator_utils.to_feature_columns_and_input_fn(
            df,
            base_input_keys_with_defaults={"a": 1,
                                           "b": 2,
                                           "c": 3,
                                           "d": 4},
            label_keys=["g"],
            feature_keys=["a", "b", "f"]))

    expected_feature_column_a = feature_column.DataFrameColumn(
        "a",
        learn.PredefinedSeries(
            "a",
            parsing_ops.FixedLenFeature(tensor_shape.unknown_shape(),
                                        dtypes.int32, 1)))
    expected_feature_column_b = feature_column.DataFrameColumn(
        "b",
        learn.PredefinedSeries("b", parsing_ops.VarLenFeature(dtypes.int32)))
    expected_feature_column_f = feature_column.DataFrameColumn(
        "f",
        learn.TransformedSeries([
            learn.PredefinedSeries("c",
                                   parsing_ops.FixedLenFeature(
                                       tensor_shape.unknown_shape(),
                                       dtypes.int32, 3)),
            learn.PredefinedSeries("d", parsing_ops.VarLenFeature(dtypes.int32))
        ], mocks.Mock2x2Transform("iue", "eui", "snt"), "out2"))

    expected_feature_columns = [
        expected_feature_column_a, expected_feature_column_b,
        expected_feature_column_f
    ]
    self.assertEqual(sorted(expected_feature_columns), sorted(feature_columns))

    base_features, labels = input_fn()
    expected_base_features = {
        "a": mocks.MockTensor("Tensor a", dtypes.int32),
        "b": mocks.MockSparseTensor("SparseTensor b", dtypes.int32),
        "c": mocks.MockTensor("Tensor c", dtypes.int32),
        "d": mocks.MockSparseTensor("SparseTensor d", dtypes.int32)
    }
    self.assertEqual(expected_base_features, base_features)

    expected_labels = mocks.MockTensor("Out iue", dtypes.int32)
    self.assertEqual(expected_labels, labels)

    self.assertEqual(3, len(feature_columns))
Esempio n. 13
0
def _SqueezeShape(op):
  """Determine shape for squeeze op's output tensor.

  Args:
    op: Operation for which to determine shape.
  Returns:
    Shape of op's output tensor.
  Raises:
    ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
        where rank is the number of dimensions in the input tensor. Or, if
        squeeze_dims includes a dimension for which input shape has a value
        not equal to 1.
  """
  input_shape = op.inputs[0].get_shape()
  if input_shape.dims is None:
    return [tensor_shape.unknown_shape()]

  squeeze_dims = op.get_attr("squeeze_dims") or []
  wrapped_squeeze_dims = []
  input_ndims = input_shape.ndims
  for i, squeeze_dim in enumerate(squeeze_dims):
    if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
      raise ValueError(
          "squeeze_dims[%d]=%d not in [%d, %d)." % (
              i, squeeze_dim, -input_ndims, input_ndims))
    if squeeze_dim < 0:
      squeeze_dim += input_ndims
    wrapped_squeeze_dims.append(squeeze_dim)

  result_shape = []
  for i, dim in enumerate([d.value for d in input_shape.dims]):
    is_explicit_match = i in wrapped_squeeze_dims
    if dim is None:
      if is_explicit_match:
        # Assume that the squeezed dimension will be 1 at runtime.
        continue
      if not wrapped_squeeze_dims:
        # If squeezing all 1 dimensions and we see a None, give up.
        return [tensor_shape.unknown_shape()]
    elif dim == 1:
      if is_explicit_match or not wrapped_squeeze_dims:
        continue
    elif is_explicit_match:
      raise ValueError(
          "Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
              i, dim))
    result_shape.append(dim)
  return [tensor_shape.TensorShape(result_shape)]
Esempio n. 14
0
def _dense_to_dense_shape(op):
  """Shapes for `SparseTensor` result given 2 dense inputs.

  Args:
    op: Operation with 2 dense `Tensor` inputs.

  Returns:
    Tuple of three shapes corresponding to the indices, values, and shape
    `Tensor` components of the result `SparseTensor`.

  Raises:
    ValueError: if either input `Tensor` has rank < 2, or ranks do not match, or
    first n-1 dims of input shapes are not compatible.
  """
  # The following should stay in sync with `ComputeDenseToDense` shape
  # assertions in kernels/set_kernels.cc.
  input0_shape = op.inputs[0].get_shape()
  input0_rank = input0_shape.ndims
  if (input0_rank is not None) and (input0_rank < 2):
    raise ValueError("Input 0, expected rank >= 2, got shape %s." %
                     input0_shape)
  # Dimension n contains the set values to be compared, so ranks and the first
  # n-1 dimensions of inputs and output must match.
  input1_shape = op.inputs[1].get_shape()
  input1_rank = input1_shape.ndims
  if (input0_rank is not None) and (input1_rank is not None) and (
      input0_rank != input1_rank):
    raise ValueError(
        "Ranks do not match: input 0 with shape %s, input 1 with shape %s." %
        (input0_shape, input1_shape))
  output_rank = input1_rank if input0_rank is None else input0_rank
  output_dim0 = input1_shape[1] if input0_shape[0] is None else input0_shape[0]
  input0_dims = input0_shape.dims
  if input0_dims is None:
    group0_shape = tensor_shape.unknown_shape()
  else:
    group0_shape = tensor_shape.TensorShape(input0_dims[:-1])
  input1_dims = input1_shape.dims
  if input1_dims is None:
    group1_shape = tensor_shape.unknown_shape()
  else:
    group1_shape = tensor_shape.TensorShape(input1_dims[:-1])
  group0_shape.assert_is_compatible_with(group1_shape)

  indices_shape = tensor_shape.TensorShape((output_dim0, output_rank))
  values_shape = tensor_shape.unknown_shape(1)
  shape_shape = tensor_shape.TensorShape((output_rank,))
  return (indices_shape, values_shape, shape_shape)
Esempio n. 15
0
def _ExpandDimsShape(op):
  """Determine shape for expand op's output tensor.

  Args:
    op: Operation for which to determine shape.
        op.inputs[0] is the input tensor.
        op.inputs[1] is the dimension in which to expand.
  Returns:
    Shape of op's output tensor.
  Raises:
    ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
        of dimensions in the input tensor.
  """
  input_shape = op.inputs[0].get_shape()
  if input_shape.dims is None:
    return [tensor_shape.unknown_shape()]
  dim = tensor_util.ConstantValue(op.inputs[1])
  input_ndims = input_shape.ndims
  if dim < -input_ndims - 1 or dim > input_ndims:
    raise ValueError(
        "dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
  if dim < 0:
    dim += (input_ndims + 1)
  result_shape = list(input_shape.dims)
  result_shape.insert(dim, 1)
  return [tensor_shape.TensorShape(result_shape)]
Esempio n. 16
0
def _DepthwiseConv2dNativeBackpropInputShape(op):
  """Shape function for the DepthwiseConv2dNativeBackpropInput op."""
  input_shape = tensor_util.constant_value(op.inputs[0])
  if input_shape is not None:
    return [tensor_shape.TensorShape(input_shape.tolist())]
  else:
    return [tensor_shape.unknown_shape(ndims=4)]
Esempio n. 17
0
def _TileShape(op):
  """Shape function for the Tile op.

  This op has two inputs:

  * input: A rank-N tensor.
  * multiples: A length-N vector, in which the i^th element contains
    the factor by which `input` will be tiled in the i^th dimension.

  It has one output, which has the same rank as input, and additional
  elements according to the values in multiples

  Args:
    op: A Tile Operation.

  Returns:
    A single-element list containing the shape of the output.
  """
  multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
  multiples = tensor_util.ConstantValue(op.inputs[1])
  if multiples is None:
    return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
  else:
    output_dims = []
    multiples = multiples.ravel()
    for i, dim in enumerate(input_shape.dims):
      output_dims.append(dim * multiples[i])
    return [tensor_shape.TensorShape(output_dims)]
Esempio n. 18
0
 def testShape(self):
   op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
                      [], [dtypes.float32])
   t = op.outputs[0]
   self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
   t.set_shape([1, 2, 3])
   self.assertEqual([1, 2, 3], t.get_shape())
Esempio n. 19
0
def _AvgPool3DGradShape(op):
  """Shape function for the AvgPool3DGrad op."""
  orig_input_shape = tensor_util.constant_value(op.inputs[0])
  if orig_input_shape != None:  # pylint:disable=g-equals-none
    return [tensor_shape.TensorShape(orig_input_shape.tolist())]
  else:
    return [tensor_shape.unknown_shape(ndims=5)]
Esempio n. 20
0
def _RandomShape(op):
  shape_val = tensor_util.constant_value(op.inputs[0])
  if shape_val is not None:
    return [tensor_shape.TensorShape(shape_val)]
  else:
    shape_shape = op.inputs[0].get_shape().with_rank(1)
    return [tensor_shape.unknown_shape(ndims=shape_shape[0].value)]
Esempio n. 21
0
def _TransposeShape(op):
  """Shape function for the Transpose op.

  This op takes two inputs:

  * input: a rank-N tensor of arbitrary shape.
  * shuffle: a length-N vector.

  Its output is the rank-N tensor computed by permuting the dimensions
  of input according to shuffle.

  Args:
    op: A Transpose op.

  Returns:
    A single-element list containing the shape of the output.

  Raises:
    ValueError: If the shapes of input and shuffle are incompatible.
    IndexError: If shuffle contains an index that is >= the rank of input.
  """
  input_shape = op.inputs[0].get_shape()
  transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
      input_shape.ndims))
  transpose_vec = tensor_util.ConstantValue(op.inputs[1])
  if transpose_vec is None:
    return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
  else:
    return [tensor_shape.TensorShape([input_shape[i]
                                      for i in transpose_vec.tolist()])]
Esempio n. 22
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
  """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.
  """
  shape = tensor.get_shape().with_rank(1)
  if tensor.get_shape() == [0]:
    return tensor_shape.scalar()
  elif tensor.op.type == "Shape":
    return tensor.op.inputs[0].get_shape()
  elif tensor.op.type == "Pack":
    ret = tensor_shape.scalar()  # Empty list.
    for pack_input in tensor.op.inputs:
      # `pack_input` must be a scalar. Attempt to evaluate it, and append it
      # to `ret`.
      pack_input_val = constant_value(pack_input)
      if pack_input_val is None or pack_input_val < 0:
        new_dim = tensor_shape.Dimension(None)
      else:
        new_dim = tensor_shape.Dimension(pack_input_val)
      ret = ret.concatenate([new_dim])
    return ret
  elif tensor.op.type == "Concat":
    # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
    # the only legal value when concatenating vectors, and it will
    # have been checked by a previous shape function.
    ret = tensor_shape.scalar()  # Empty list.
    for concat_input in tensor.op.inputs[1:]:
      # `concat_input` must be a vector. Attempt to evaluate it as a shape,
      # and concatenate it with `ret`.
      ret = ret.concatenate(constant_value_as_shape(concat_input))
    return ret
  elif tensor.op.type == "ConcatV2":
    # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
    # the only legal value when concatenating vectors, and it will
    # have been checked by a previous shape function.
    ret = tensor_shape.scalar()  # Empty list.
    for concat_input in tensor.op.inputs[:-1]:
      # `concat_input` must be a vector. Attempt to evaluate it as a shape,
      # and concatenate it with `ret`.
      ret = ret.concatenate(constant_value_as_shape(concat_input))
    return ret
  else:
    ret = tensor_shape.unknown_shape(shape[0].value)
    value = constant_value(tensor)
    if value is not None:
      ret = ret.merge_with(tensor_shape.TensorShape(
          [d if d != -1 else None for d in value]))
    return ret
Esempio n. 23
0
def _ConcatShape(op):
  concat_dim = tensor_util.constant_value(op.inputs[0])
  if concat_dim is None:
    # Return an unknown shape with the same rank as the inputs, or an
    # unknown rank if no input's rank is known.
    rank = None
    for value in op.inputs[1:]:
      if rank is not None:
        value.get_shape().assert_has_rank(rank)
      else:
        rank = value.get_shape().ndims
    if rank == 0:
      raise ValueError("Can't concatenate scalars (use tf.pack instead)")
    return [tensor_shape.unknown_shape(ndims=rank)]

  else:
    # Merge all the non-concat dims, and sum the concat dim to make an
    # output shape.
    concat_dim = int(concat_dim)
    output_shape = op.inputs[1].get_shape()
    for value in op.inputs[2:]:
      value_shape = value.get_shape()
      if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
        raise ValueError("concat_dim is out of range (values rank = %d)" %
                         value_shape.ndims)
      before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
      at = output_shape[concat_dim] + value_shape[concat_dim]
      after = output_shape[
          concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
      output_shape = before.concatenate(at).concatenate(after)
    return [output_shape]
Esempio n. 24
0
def _reverse_seq(input_seq, lengths):
    """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
    if lengths is None:
        return list(reversed(input_seq))

    input_shape = tensor_shape.unknown_shape(ndims=input_seq[0].get_shape().ndims)
    for input_ in input_seq:
        input_shape.merge_with(input_.get_shape())
        input_.set_shape(input_shape)

    # Join into (time, batch_size, depth)
    s_joined = array_ops.pack(input_seq)

    # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
    if lengths is not None:
        lengths = math_ops.to_int64(lengths)

    # Reverse along dimension 0
    s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops.unpack(s_reversed)
    for r in result:
        r.set_shape(input_shape)
    return result
Esempio n. 25
0
 def testSkipEagerBuildElementShape(self):
   fn = list_ops._build_element_shape
   # Unknown shape -> -1.
   self.assertEqual(fn(None), -1)
   self.assertEqual(fn(tensor_shape.unknown_shape()), -1)
   # Scalar shape -> [] with type int32.
   self.assertEqual(fn([]).dtype, dtypes.int32)
   self.assertEqual(fn(tensor_shape.scalar()).dtype, dtypes.int32)
   self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32))
   self.assertAllEqual(
       self.evaluate(fn(tensor_shape.scalar())), np.array([], np.int32))
   # Tensor -> Tensor
   shape = constant_op.constant(1)
   self.assertIs(fn(shape), shape)
   # Shape with unknown dims -> shape list with -1's.
   shape = [None, 5]
   self.assertAllEqual(fn(shape), [-1, 5])
   self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5])
   # Shape with unknown dims and tensor dims -> shape list with -1's and tensor
   # dims.
   t = array_ops.placeholder(dtypes.int32)
   shape = [None, 5, t]
   result = fn(shape)
   self.assertAllEqual(result[:2], [-1, 5])
   self.assertIs(result[2], t)
Esempio n. 26
0
def _RandomShape(op):
  shape_val = tensor_util.ConstantValue(op.inputs[0])
  if shape_val is not None:
    return [tensor_shape.TensorShape(shape_val.tolist())]
  else:
    shape_shape = op.inputs[0].get_shape().with_rank_at_most(1)
    return [tensor_shape.unknown_shape(ndims=shape_shape.num_elements())]
Esempio n. 27
0
def _TensorArrayReadShape(op):
    # handle, index, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
    op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
    # value
    return [tensor_shape.unknown_shape()]
Esempio n. 28
0
 def testAssignNoValueShapeNoValidateShape(self):
   value = self._NewShapelessTensor()
   shape = [1, 2]
   var = state_ops.variable_op(shape, dtypes.float32)
   self.assertEqual(shape, var.get_shape())
   assigned = state_ops.assign(var, value, validate_shape=False)
   self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
Esempio n. 29
0
  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = constant_op.constant(0)
      h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")

      def c(x):
        return math_ops.less(x, 10)

      def b(x):
        with ops.control_dependencies([x]):
          a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with ops.control_dependencies([v]):
          return math_ops.add(x, 1)

      r = control_flow_ops.while_loop(c, b, [n])

      v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)

      def c1(x, y):
        return math_ops.greater(x, 0)

      def b1(x, y):
        nx = math_ops.subtract(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
        return [nx, ny]

      rx, ry = control_flow_ops.while_loop(
          c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
Esempio n. 30
0
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
                shared_name=""):
  """Create a variable Operation.

  See also variables.Variable.

  Args:
    shape: The shape of the tensor managed by this variable
    dtype: The underlying type of the tensor values.
    name: optional name to use for the variable op.
    set_shape: If True, set the shape property of the returned Tensor to
      the shape argument.
    container: An optional string. Defaults to "".
      If non-empty, this variable is placed in the given container.
      Otherwise, a default container is used.
    shared_name: An optional string. Defaults to "".
      If non-empty, this variable is named in the given bucket
      with this shared_name. Otherwise, the node name is used instead.

  Returns:
    A variable tensor.
  """
  if not set_shape:
    shape = tensor_shape.unknown_shape()
  ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
                                container=container, shared_name=shared_name)
  # TODO(mrry): Move this to where it is used, so we can get rid of this op
  #   wrapper?
  if set_shape:
    ret.set_shape(shape)
  return ret
Esempio n. 31
0
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
    """Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  For example:

  ```python
  # tensor 'a' is [[1, 2], [3, 4]
  # tensor `b` is [[5, 0], [0, 6]]
  tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]

  # Explicitly pass shape and type
  tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
    ==> [[7, 4], [6, 14]]
  ```

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Shape of elements of `inputs`.
    tensor_dtype: The type of `inputs`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
    if tensor_dtype is None:
        if not inputs or not isinstance(inputs, (list, tuple)):
            raise ValueError(
                "inputs must be a list of at least one Tensor with the "
                "same dtype and shape")
        inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
        if not all(isinstance(x, ops.Tensor) for x in inputs):
            raise ValueError(
                "inputs must be a list of at least one Tensor with the "
                "same dtype and shape")
        if not all(x.dtype == inputs[0].dtype for x in inputs):
            raise ValueError(
                "inputs must be a list of at least one Tensor with the "
                "same dtype and shape")
        tensor_dtype = inputs[0].dtype
    if shape is not None:
        shape = tensor_shape.as_shape(shape)
    else:
        shape = tensor_shape.unknown_shape()
        for input_tensor in inputs:
            if isinstance(input_tensor, ops.Tensor):
                shape = shape.merge_with(input_tensor.get_shape())
    if not shape.is_fully_defined():
        # TODO(pbar): Make a version of assign_add that accepts an uninitialized
        # lvalue, and takes its shape from that? This would allow accumulate_n to
        # work in all situations that add_n currently works.
        raise ValueError(
            "Cannot infer the shape of the accumulator for "
            "accumulate_n. Pass the shape argument, or set the shape "
            "of at least one of the inputs.")
    with ops.op_scope(inputs, name, "AccumulateN") as name:
        var = gen_state_ops._temporary_variable(shape=shape,
                                                dtype=tensor_dtype)
        var_name = var.op.name
        var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
        update_ops = []
        for input_tensor in inputs:
            op = state_ops.assign_add(var, input_tensor, use_locking=True)
            update_ops.append(op)
        with ops.control_dependencies(update_ops):
            return gen_state_ops._destroy_temporary_variable(var,
                                                             var_name=var_name,
                                                             name=name)
Esempio n. 32
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
    """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.
  """
    if context.in_eager_mode():
        return tensor_shape.as_shape(
            [dim if dim != -1 else None for dim in tensor.numpy()])

    shape = tensor.get_shape().with_rank(1)
    if tensor.get_shape() == [0]:
        return tensor_shape.scalar()
    elif tensor.op.type == "Shape":
        return tensor.op.inputs[0].get_shape()
    elif tensor.op.type == "Pack":
        ret = tensor_shape.scalar()  # Empty list.
        # Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
        # would not be rank 1.
        assert tensor.op.get_attr("axis") == 0
        for pack_input in tensor.op.inputs:
            # `pack_input` must be a scalar. Attempt to evaluate it, and append it
            # to `ret`.
            pack_input_val = constant_value(pack_input)
            if pack_input_val is None or pack_input_val < 0:
                new_dim = tensor_shape.Dimension(None)
            else:
                new_dim = tensor_shape.Dimension(pack_input_val)
            ret = ret.concatenate([new_dim])
        return ret
    elif tensor.op.type == "Concat":
        # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.scalar()  # Empty list.
        for concat_input in tensor.op.inputs[1:]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "ConcatV2":
        # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.scalar()  # Empty list.
        for concat_input in tensor.op.inputs[:-1]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "StridedSlice":
        try:
            begin = constant_value(tensor.op.inputs[1])
            end = constant_value(tensor.op.inputs[2])
            strides = constant_value(tensor.op.inputs[3])
            if begin is not None and end is not None and strides is not None:
                begin = begin[0]
                end = end[0]
                strides = strides[0]
                begin_mask = tensor.op.get_attr("begin_mask")
                if begin_mask == 1:
                    begin = None
                end_mask = tensor.op.get_attr("end_mask")
                if end_mask == 1:
                    end = None

                ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
                new_axis_mask = tensor.op.get_attr("new_axis_mask")
                shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
                valid_attributes = (not ellipsis_mask and not new_axis_mask
                                    and not shrink_axis_mask
                                    and (not begin_mask or (begin_mask == 1))
                                    and (not end_mask or (end_mask == 1)))
                if valid_attributes:  # additional inputs not supported
                    prev = constant_value_as_shape(tensor.op.inputs[0])
                    prev = prev[begin:end:strides]
                    ret = tensor_shape.TensorShape(prev)
                    return ret

        except ValueError:  # Could come from get_attr or slicing prev.
            pass
        except TypeError:  # Could come from slicing prev.
            pass

    ret = tensor_shape.unknown_shape(shape[0].value)
    value = constant_value(tensor)
    if value is not None:
        ret = ret.merge_with(
            tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
    return ret
Esempio n. 33
0
def _TensorArrayConcatShape(op):
    # handle, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
    # value, lengths
    return [tensor_shape.unknown_shape(), tensor_shape.vector(None)]
Esempio n. 34
0
 def _NewShapelessTensor(self):
     tensor = tf.placeholder(tf.float32)
     self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
     return tensor
Esempio n. 35
0
 def testAssignUpdateNoShape(self):
   var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
   added = tf.assign_add(var, self._NewShapelessTensor())
   self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
   subbed = tf.assign_sub(var, self._NewShapelessTensor())
   self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
Esempio n. 36
0
def _AccumulatorShape(inputs):
  shape = tensor_shape.unknown_shape()
  for i in inputs:
    if isinstance(i, ops.Tensor):
      shape = shape.merge_with(i.get_shape())
  return shape
Esempio n. 37
0
def _LookupTableExportShape(op):
    """Shape function for data_flow_ops._lookup_table_export_values."""
    op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
    keys_shape = tensor_shape.vector(None)
    values_shape = tensor_shape.unknown_shape()
    return [keys_shape, values_shape]
Esempio n. 38
0
def _MatchingFilesShape(op):
    """Shape function for the MatchingFiles op."""
    unused_patern_shape = op.inputs[0].get_shape().merge_with(
        tensor_shape.scalar())
    return [tensor_shape.unknown_shape(ndims=1)]
Esempio n. 39
0
def _ReaderReadUpToShape(_):
    """Shape function for the ReaderBase.ReadUpTo op."""
    return [
        tensor_shape.unknown_shape(ndims=1),
        tensor_shape.unknown_shape(ndims=1)
    ]
Esempio n. 40
0
def unknown_shape(op):
    """Shape function for use with ops whose output shapes are unknown."""
    return [tensor_shape.unknown_shape() for _ in op.outputs]
 def f():
     c = lambda n: n < 10
     b = lambda n: n * x
     return control_flow_ops.while_loop(
         c, b, [n], [tensor_shape.unknown_shape()])
Esempio n. 42
0
 def testset_shape(self):
     p = state_ops.variable_op([1, 2], dtypes.float32)
     self.assertEqual([1, 2], p.get_shape())
     p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
     self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
Esempio n. 43
0
 def testAssignNoVarShapeNoValidateShape(self):
     value = np.array([[42.0, 43.0]])
     var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
     self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
     assigned = tf.assign(var, value, validate_shape=False)
     self.assertShapeEqual(value, assigned)
Esempio n. 44
0
def _TensorArrayPackShape(op):
    # handle, flow_in
    op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
    op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
    # value
    return [tensor_shape.unknown_shape()]
Esempio n. 45
0
 def testAsDenseShapes(self):
   test_cases = (
       {
           "types": (),
           "classes": (),
           "expected": ()
       },
       {
           "types": tensor_shape.scalar(),
           "classes": ops.Tensor,
           "expected": tensor_shape.scalar()
       },
       {
           "types": tensor_shape.scalar(),
           "classes": sparse_tensor.SparseTensor,
           "expected": tensor_shape.unknown_shape()
       },
       {
           "types": (tensor_shape.scalar()),
           "classes": (ops.Tensor),
           "expected": (tensor_shape.scalar())
       },
       {
           "types": (tensor_shape.scalar()),
           "classes": (sparse_tensor.SparseTensor),
           "expected": (tensor_shape.unknown_shape())
       },
       {
           "types": (tensor_shape.scalar(), ()),
           "classes": (ops.Tensor, ()),
           "expected": (tensor_shape.scalar(), ())
       },
       {
           "types": ((), tensor_shape.scalar()),
           "classes": ((), ops.Tensor),
           "expected": ((), tensor_shape.scalar())
       },
       {
           "types": (tensor_shape.scalar(), ()),
           "classes": (sparse_tensor.SparseTensor, ()),
           "expected": (tensor_shape.unknown_shape(), ())
       },
       {
           "types": ((), tensor_shape.scalar()),
           "classes": ((), sparse_tensor.SparseTensor),
           "expected": ((), tensor_shape.unknown_shape())
       },
       {
           "types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
           "classes": (ops.Tensor, (), ops.Tensor),
           "expected": (tensor_shape.scalar(), (), tensor_shape.scalar())
       },
       {
           "types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
           "classes": (sparse_tensor.SparseTensor, (),
                       sparse_tensor.SparseTensor),
           "expected": (tensor_shape.unknown_shape(), (),
                        tensor_shape.unknown_shape())
       },
       {
           "types": ((), tensor_shape.scalar(), ()),
           "classes": ((), ops.Tensor, ()),
           "expected": ((), tensor_shape.scalar(), ())
       },
       {
           "types": ((), tensor_shape.scalar(), ()),
           "classes": ((), sparse_tensor.SparseTensor, ()),
           "expected": ((), tensor_shape.unknown_shape(), ())
       },
   )
   for test_case in test_cases:
     self.assertShapesEqual(
         sparse.as_dense_shapes(test_case["types"], test_case["classes"]),
         test_case["expected"])
Esempio n. 46
0
 def _NewShapelessTensor(self):
     tensor = array_ops.placeholder(dtypes.float32)
     self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
     return tensor
Esempio n. 47
0
 def _flat_shapes(self):
     # NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
     # but a `SparseTensorStructure` can also represent a batch of boxed
     # `SparseTensor` objects with shape `(?, 3)` (and batches of batches, etc.),
     # so the flat shape must be unknown.
     return [tensor_shape.unknown_shape(None)]
Esempio n. 48
0
def _LookupTableFindShape(op):
    """Shape function for data_flow_ops._lookup_table_find."""
    op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
    return [tensor_shape.unknown_shape()]
Esempio n. 49
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
    """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.
  """
    shape = tensor.get_shape().with_rank(1)
    if tensor.get_shape() == [0]:
        return tensor_shape.scalar()
    elif tensor.op.type == "Shape":
        return tensor.op.inputs[0].get_shape()
    elif tensor.op.type == "Pack":
        ret = tensor_shape.scalar()  # Empty list.
        for pack_input in tensor.op.inputs:
            # `pack_input` must be a scalar. Attempt to evaluate it, and append it
            # to `ret`.
            pack_input_val = constant_value(pack_input)
            if pack_input_val is None or pack_input_val < 0:
                new_dim = tensor_shape.Dimension(None)
            else:
                new_dim = tensor_shape.Dimension(pack_input_val)
            ret = ret.concatenate([new_dim])
        return ret
    elif tensor.op.type == "Concat":
        # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.scalar()  # Empty list.
        for concat_input in tensor.op.inputs[1:]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "ConcatV2":
        # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.scalar()  # Empty list.
        for concat_input in tensor.op.inputs[:-1]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    else:
        ret = tensor_shape.unknown_shape(shape[0].value)
        value = constant_value(tensor)
        if value is not None:
            ret = ret.merge_with(
                tensor_shape.TensorShape(
                    [d if d != -1 else None for d in value]))
        return ret
Esempio n. 50
0
 def get_shape(self):
     return tensor_shape.unknown_shape()
Esempio n. 51
0
def _PlaceholderShape(op):
    given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
    if given_shape:
        return [tensor_shape.TensorShape(given_shape)]
    else:
        return [tensor_shape.unknown_shape()]
Esempio n. 52
0
def _AddNShape(op):
    merged_shape = tensor_shape.unknown_shape()
    for input_ in op.inputs:
        merged_shape = merged_shape.merge_with(input_.get_shape())
    return [merged_shape]
Esempio n. 53
0
def safe_embedding_lookup_sparse(
    embedding_weights,
    sparse_ids,
    sparse_weights=None,
    combiner="mean",
    default_id=None,
    name="safe_embedding_lookup_sparse",
    partition_strategy=None,  # no used
    max_norm=None,
    return_trainable=False,
):
    """Provides a dynamic version of `tf.nn.safe_embedding_lookup_sparse`.

    Lookup embedding results, accounting for empty features and invalid weights.

    Any IDs will be treated as valid include non-positive IDs.
    Invalid weights (<= 0) are pruned from input weights, as well as any IDs
    with non-positive weight. For an entry with no features, the embedding vector
    for `default_id` is returned, or the 0-vector if `default_id` is not supplied.

    The ids and weights may be multi-dimensional. Embeddings are always aggregated
    along the last dimension.

    Args:
      embedding_weights: A single `dynamic_embedding.Variable` instance
        representing the complete embedding tensor.
      sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
        ids. `d_0` is typically batch size.
      sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
        float weights corresponding to `sparse_ids`, or `None` if all weights are
        be assumed to be 1.0.
      combiner: A string specifying how to combine embedding results for each
        entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
        default.
      default_id: The id to use for an entry with no features.
      name: A name for this operation (optional).
      partition_strategy: A string specifying the partitioning strategy. Currently
        `"div"` and `"mod"` are supported. Default is `"div"`.
      max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
        combining.

    Returns:
      combined_embeddings:
        A dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
      trainable_wrap:
        A TrainableWrapper object used to fill the Optimizers `var_list`
          Only provided if `return_trainable` is True.

    Raises:
      ValueError: if `embedding_weights` is empty.
  """
    if embedding_weights is None:
        raise ValueError("Missing embedding_weights %s." % embedding_weights)

    if embedding_weights.key_dtype != sparse_ids.dtype:
        raise TypeError(
            "embedding_weights.key_dtype should be same with sparse_ids.dtype: "
            "{} vs. {}".format(embedding_weights.key_dtype, sparse_ids.dtype))

    weights_dtype = sparse_weights.dtype if sparse_weights is not None else None
    if weights_dtype and embedding_weights.value_dtype != weights_dtype:
        raise TypeError(
            "embedding_weights.value_dtype should be same with sparse_weights.dtype"
            ": {} vs. {}".format(embedding_weights.value_dtype, weights_dtype))

    scope = variable_scope.get_variable_scope()
    full_name = scope.name + "/" + name if scope.name else name
    with ops.name_scope(full_name + "/"):
        # Reshape higher-rank sparse ids and weights to linear segment ids.
        original_shape = sparse_ids.dense_shape
        original_rank_dim = tensor_shape.dimension_value(
            sparse_ids.dense_shape.get_shape()[0])
        original_rank = (array_ops.size(original_shape)
                         if original_rank_dim is None else original_rank_dim)
        sparse_ids = sparse_ops.sparse_reshape(
            sparse_ids,
            [
                math_ops.reduce_prod(
                    array_ops.slice(original_shape, [0], [original_rank - 1])),
                array_ops.gather(original_shape, original_rank - 1),
            ],
        )
        if sparse_weights is not None:
            sparse_weights = sparse_tensor.SparseTensor(
                sparse_ids.indices, sparse_weights.values,
                sparse_ids.dense_shape)

        # Prune invalid weights.
        if combiner != "sum":
            sparse_ids, sparse_weights = _prune_invalid_weights(
                sparse_ids, sparse_weights)

        # Fill in dummy values for empty features, if necessary.
        sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
            sparse_ids, default_id or 0)
        if sparse_weights is not None:
            sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(
                sparse_weights, 1.0)

        result, trainable_ = embedding_lookup_sparse(
            embedding_weights,
            sparse_ids,
            sparse_weights,
            combiner=combiner,
            partition_strategy=partition_strategy,
            name=name + "/embedding_lookup_sparse",
            max_norm=max_norm,
            return_trainable=True,
        )

        if default_id is None:
            # Broadcast is_row_empty to the same shape as embedding_lookup_result,
            # for use in Select.
            is_row_empty = array_ops.tile(
                array_ops.reshape(is_row_empty, [-1, 1]),
                array_ops.stack([1, array_ops.shape(result)[1]]),
            )

            result = array_ops.where(is_row_empty,
                                     array_ops.zeros_like(result),
                                     result,
                                     name="where")

        # Reshape back from linear ids back into higher-dimensional dense result.
        final_result = array_ops.reshape(
            result,
            array_ops.concat(
                [
                    array_ops.slice(
                        math_ops.cast(original_shape, dtypes.int32),
                        [0],
                        [original_rank - 1],
                    ),
                    array_ops.slice(array_ops.shape(result), [1], [-1]),
                ],
                0,
            ),
        )
        final_result.set_shape(
            tensor_shape.unknown_shape(
                (tensor_shape.Dimension(original_rank_dim) -
                 1).value).concatenate(result.get_shape()[1:]))
        return (final_result, trainable_) if return_trainable else final_result
Esempio n. 54
0
def constant_value_as_shape(tensor):  # pylint: disable=invalid-name
    """A version of `constant_value()` that returns a `TensorShape`.

  This version should be used when a constant tensor value is
  interpreted as a (possibly partial) shape, e.g. in the shape
  function for `tf.reshape()`. By explicitly requesting a
  `TensorShape` as the return value, it is possible to represent
  unknown dimensions; by contrast, `constant_value()` is
  all-or-nothing.

  Args:
    tensor: The rank-0 or rank-1 Tensor to be evaluated.

  Returns:
    A `TensorShape` based on the constant value of the given `tensor`.

  Raises:
    ValueError: If the shape is rank-0 and is not statically known to be -1.
  """
    if isinstance(tensor, ops.EagerTensor):
        return tensor_shape.as_shape(
            [dim if dim != -1 else None for dim in tensor.numpy()])

    if tensor.get_shape().ndims == 0:
        value = constant_value(tensor)
        if value is None:
            raise ValueError(
                "Received a scalar with unknown value as shape; require a statically "
                "known scalar with value '-1' to describe an unknown shape.")
        if value != -1:
            raise ValueError(
                "Received a scalar value '%s' as shape; require a statically known "
                "scalar with value '-1' to describe an unknown shape." % value)
        return tensor_shape.unknown_shape()

    shape = tensor.get_shape().with_rank(1)
    if shape == [0]:
        return tensor_shape.TensorShape([])
    elif tensor.op.type == "Cast":
        pre_cast = constant_value_as_shape(tensor.op.inputs[0])
        if pre_cast.dims is None:
            # the input to cast has a totally undefined shape; just return that.
            return pre_cast
        cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
        if cast_dtype not in (dtypes.int32, dtypes.int64):
            return tensor_shape.unknown_shape(shape.dims[0].value)
        dest_dtype_shape_array = np.array([
            x if x is not None else -1 for x in pre_cast.as_list()
        ]).astype(cast_dtype.as_numpy_dtype)
        return tensor_shape.TensorShape(
            [x if x >= 0 else None for x in dest_dtype_shape_array])
    elif tensor.op.type == "Shape":
        return tensor.op.inputs[0].get_shape()
    elif tensor.op.type == "Pack":
        ret = tensor_shape.TensorShape([])  # Empty list.
        # Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
        # would not be rank 1.
        assert tensor.op.get_attr("axis") == 0
        for pack_input in tensor.op.inputs:
            # `pack_input` must be a scalar. Attempt to evaluate it, and append it
            # to `ret`.
            pack_input_val = constant_value(pack_input)
            if pack_input_val is None or pack_input_val < 0:
                new_dim = tensor_shape.Dimension(None)
            else:
                new_dim = tensor_shape.Dimension(pack_input_val)
            ret = ret.concatenate([new_dim])
        return ret
    elif tensor.op.type == "Concat":
        # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.TensorShape([])  # Empty list.
        for concat_input in tensor.op.inputs[1:]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "ConcatV2":
        # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
        # the only legal value when concatenating vectors, and it will
        # have been checked by a previous shape function.
        ret = tensor_shape.TensorShape([])  # Empty list.
        for concat_input in tensor.op.inputs[:-1]:
            # `concat_input` must be a vector. Attempt to evaluate it as a shape,
            # and concatenate it with `ret`.
            ret = ret.concatenate(constant_value_as_shape(concat_input))
        return ret
    elif tensor.op.type == "StridedSlice":
        try:
            begin = constant_value(tensor.op.inputs[1])
            end = constant_value(tensor.op.inputs[2])
            strides = constant_value(tensor.op.inputs[3])
            if begin is not None and end is not None and strides is not None:
                begin = begin[0]
                end = end[0]
                strides = strides[0]
                begin_mask = tensor.op.get_attr("begin_mask")
                if begin_mask == 1:
                    begin = None
                end_mask = tensor.op.get_attr("end_mask")
                if end_mask == 1:
                    end = None

                ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
                new_axis_mask = tensor.op.get_attr("new_axis_mask")
                shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
                valid_attributes = (not ellipsis_mask and not new_axis_mask
                                    and not shrink_axis_mask
                                    and (not begin_mask or (begin_mask == 1))
                                    and (not end_mask or (end_mask == 1)))
                if valid_attributes:  # additional inputs not supported
                    prev = constant_value_as_shape(tensor.op.inputs[0])
                    prev = prev[begin:end:strides]
                    ret = tensor_shape.TensorShape(prev)
                    return ret

        except ValueError:  # Could come from get_attr or slicing prev.
            pass
        except TypeError:  # Could come from slicing prev.
            pass
    elif (tensor.op.type == "Placeholder" and tensor.op.graph.building_function
          and hasattr(tensor.op.graph, "internal_captures")):
        # If we are inside a FuncGraph try to lookup the constant value of the
        # corresponding external capture. Note that we only look at captures and
        # not the fed inputs because those can be fed different values in different
        # instantiations of the function call or different iterations of a
        # tf.while_loop.
        for i, capture in enumerate(tensor.op.graph.internal_captures):
            if capture is tensor:
                external_capture = tensor.op.graph.external_captures[i]
                return constant_value_as_shape(external_capture)

    ret = tensor_shape.unknown_shape(shape.dims[0].value)
    value = constant_value(tensor)
    if value is not None:
        ret = ret.merge_with(
            tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
    return ret
Esempio n. 55
0
    def __init__(self,
                 initial_value=None,
                 name=None,
                 trainable=True,
                 collections=None,
                 dtype=None,
                 shape=None):
        """Creates a variable.

    Args:
      initial_value: An `Output` or Python object convertible to an `Output`
        representing the initial value of this variable.
      name: The name of this variable. Automatically uniquified.
      trainable: Whether the global read of this variable will be used for
        training.
      collections: Additional collections to which the `read` operation for
        this variable is to be added. Defaults to [].
      dtype: The type of this variable. Can be omitted if it can be deduced
        from the initial_value. If different from the type of the initial
        value it will be cast to this type.
      shape: The shape of this variable. Only specify if there is no initial
        value but shape inference is desired.
    """
        if initial_value is not None:
            initial_value = ops.convert_to_tensor(initial_value)
        if dtype is None:
            assert initial_value is not None, (
                "Trying to create a resource variable "
                "with no dtype or initial value. At"
                " least one of these must be set.")
            dtype = initial_value.dtype
        elif initial_value is not None:
            initial_value = math_ops.cast(initial_value, dtype)
        if shape is None:
            if initial_value is not None:
                shape = initial_value.get_shape().as_proto()
            else:
                shape = tensor_shape.unknown_shape()
        else:
            shape = tensor_shape.as_shape(shape)

        self._dtype = dtype
        with ops.name_scope(name, "Variable", [initial_value]) as name:
            self._handle = gen_resource_variable_ops.var_handle_op(
                shared_name=name, name=name, dtype=dtype, shape=shape)

            with ops.name_scope("IsInitialized"):
                self._is_initialized_op = (
                    gen_resource_variable_ops.var_is_initialized_op(
                        self._handle))
            if initial_value is not None:
                with ops.name_scope("Create"):
                    self._initialize_op = gen_resource_variable_ops.create_variable_op(
                        self._handle, initial_value)
                resources.register_resource(self._handle, self._initialize_op,
                                            self._is_initialized_op)

            with ops.name_scope("Read"):
                self._value = gen_resource_variable_ops.read_variable_op(
                    self._handle, dtype=self._dtype)
            _register_variable_read(self._value,
                                    trainable=trainable,
                                    collections=collections)
Esempio n. 56
0
 def get_observation_model(self, times):
     parent_model = super(UnknownShapeModel,
                          self).get_observation_model(times)
     return array_ops.placeholder_with_default(
         input=parent_model, shape=tensor_shape.unknown_shape())
Esempio n. 57
0
def safe_embedding_lookup_sparse(embedding_weights,
                                 sparse_ids,
                                 sparse_weights=None,
                                 combiner='mean',
                                 default_id=None,
                                 name=None,
                                 partition_strategy='div',
                                 max_norm=None):
    """Lookup embedding results, accounting for invalid IDs and empty features.

  The partitioned embedding in `embedding_weights` must all be the same shape
  except for the first dimension. The first dimension is allowed to vary as the
  vocabulary size is not necessarily a multiple of `P`.  `embedding_weights`
  may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
  partitioner.

  Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
  with non-positive weight. For an entry with no features, the embedding vector
  for `default_id` is returned, or the 0-vector if `default_id` is not supplied.

  The ids and weights may be multi-dimensional. Embeddings are always aggregated
  along the last dimension.

  Args:
    embedding_weights:  A list of `P` float `Tensor`s or values representing
        partitioned embedding `Tensor`s.  Alternatively, a `PartitionedVariable`
        created by partitioning along dimension 0.  The total unpartitioned
        shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
        vocab size and `e_1, ..., e_m` are the embedding dimensions.
    sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
        ids. `d_0` is typically batch size.
    sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
        float weights corresponding to `sparse_ids`, or `None` if all weights
        are be assumed to be 1.0.
    combiner: A string specifying how to combine embedding results for each
        entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
        the default.
    default_id: The id to use for an entry with no features.
    name: A name for this operation (optional).
    partition_strategy: A string specifying the partitioning strategy.
        Currently `"div"` and `"mod"` are supported. Default is `"div"`.
    max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
        combining.


  Returns:
    Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.

  Raises:
    ValueError: if `embedding_weights` is empty.
  """
    if embedding_weights is None:
        raise ValueError('Missing embedding_weights %s.' % embedding_weights)
    if isinstance(embedding_weights, variables.PartitionedVariable):
        embedding_weights = list(
            embedding_weights)  # get underlying Variables.
    if not isinstance(embedding_weights, list):
        embedding_weights = [embedding_weights]
    if len(embedding_weights) < 1:
        raise ValueError('Missing embedding_weights %s.' % embedding_weights)

    dtype = sparse_weights.dtype if sparse_weights is not None else None
    embedding_weights = [
        ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
    ]

    with ops.name_scope(name, 'embedding_lookup', embedding_weights +
                        [sparse_ids, sparse_weights]) as scope:
        # Reshape higher-rank sparse ids and weights to linear segment ids.
        original_shape = sparse_ids.dense_shape
        original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
        original_rank = (array_ops.size(original_shape)
                         if original_rank_dim.value is None else
                         original_rank_dim.value)
        sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
            math_ops.reduce_prod(
                array_ops.slice(original_shape, [0], [original_rank - 1])),
            array_ops.gather(original_shape, original_rank - 1)
        ])
        if sparse_weights is not None:
            sparse_weights = sparse_tensor.SparseTensor(
                sparse_ids.indices, sparse_weights.values,
                sparse_ids.dense_shape)

        # Prune invalid ids and weights.
        sparse_ids, sparse_weights = _prune_invalid_ids(
            sparse_ids, sparse_weights)
        if combiner != 'sum':
            sparse_ids, sparse_weights = _prune_invalid_weights(
                sparse_ids, sparse_weights)

        # Fill in dummy values for empty features, if necessary.
        sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
            sparse_ids, default_id or 0)
        if sparse_weights is not None:
            sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(
                sparse_weights, 1.0)

        result = embedding_lookup_sparse(
            embedding_weights,
            sparse_ids,
            sparse_weights,
            combiner=combiner,
            partition_strategy=partition_strategy,
            name=None if default_id is None else scope,
            max_norm=max_norm)

        if default_id is None:
            # Broadcast is_row_empty to the same shape as embedding_lookup_result,
            # for use in Select.
            is_row_empty = array_ops.tile(
                array_ops.reshape(is_row_empty, [-1, 1]),
                array_ops.stack([1, array_ops.shape(result)[1]]))

            result = array_ops.where(is_row_empty,
                                     array_ops.zeros_like(result),
                                     result,
                                     name=scope)

        # Reshape back from linear ids back into higher-dimensional dense result.
        final_result = array_ops.reshape(
            result,
            array_ops.concat([
                array_ops.slice(math_ops.cast(original_shape, dtypes.int32),
                                [0], [original_rank - 1]),
                array_ops.slice(array_ops.shape(result), [1], [-1])
            ], 0))
        final_result.set_shape(
            tensor_shape.unknown_shape(
                (original_rank_dim - 1).value).concatenate(
                    result.get_shape()[1:]))
        return final_result
Esempio n. 58
0
    def testSlice(self):
        tf_val = array_ops.placeholder(dtypes.int32, shape=(4, ))[0:2]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([None, None], c_val.as_list())

        # begin:end
        tf_val = constant_op.constant([10, 20, 30])[1:3]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([20, 30], c_val.as_list())

        # begin:end:stride
        tf_val = array_ops.strided_slice(constant_op.constant([10, 20, 30]),
                                         [1], [3],
                                         strides=[2])
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([20], c_val.as_list())

        # [1, 2, 16, 37, None, 48]
        tf_val_orig = array_ops.concat(
            [[1, 2, 16, 37],
             array_ops.placeholder(dtypes.int32, shape=(1, )), [48]], 0)

        # begin: no end
        tf_val = tf_val_orig[2:]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 37, None, 48], c_val.as_list())

        # begin::negative slice
        tf_val = tf_val_orig[2::-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([16, 2, 1], c_val.as_list())

        # :end:negative slice
        tf_val = tf_val_orig[:1:-2]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([48, 37], c_val.as_list())

        # begin:end:negative slice
        tf_val = tf_val_orig[3:1:-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16], c_val.as_list())

        # begin:negative end:slice
        tf_val = tf_val_orig[1:-3:1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([2, 16], c_val.as_list())

        # negative begin::slice
        tf_val = tf_val_orig[-3::1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, None, 48], c_val.as_list())

        # negative begin::negative slice
        tf_val = tf_val_orig[-3::-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16, 2, 1], c_val.as_list())

        # negative begin:negative end:negative slice
        tf_val = tf_val_orig[-3:-5:-1]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([37, 16], c_val.as_list())

        # Do not support shape inference for additional arguments
        tf_val = constant_op.constant([10, 20, 30])[...]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual([None, None, None], c_val.as_list())

        # Do not support shape inference for tensor slices.
        tf_val = constant_op.constant(
            [10, 20, 30])[array_ops.placeholder(dtypes.int32, shape=()):]
        c_val = tensor_util.constant_value_as_shape(tf_val)
        self.assertEqual(tensor_shape.unknown_shape(), c_val)

        # Do not support shape inference for higher rank
        with self.assertRaises(ValueError):
            tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
            c_val = tensor_util.constant_value_as_shape(tf_val)
Esempio n. 59
0
def safe_embedding_lookup_sparse(embedding_weights,
                                 sparse_ids,
                                 sparse_weights=None,
                                 combiner="mean",
                                 default_id=None,
                                 name=None,
                                 partition_strategy="div",
                                 max_norm=None):
  """Lookup embedding results, accounting for invalid IDs and empty features.

  The partitioned embedding in `embedding_weights` must all be the same shape
  except for the first dimension. The first dimension is allowed to vary as the
  vocabulary size is not necessarily a multiple of `P`.  `embedding_weights`
  may be a `PartitionedVariable` as returned by using
  `tf.compat.v1.get_variable()` with a
  partitioner.

  Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
  with non-positive weight. For an entry with no features, the embedding vector
  for `default_id` is returned, or the 0-vector if `default_id` is not supplied.

  The ids and weights may be multi-dimensional. Embeddings are always aggregated
  along the last dimension.

  Args:
    embedding_weights: A single tensor representing the complete embedding
      tensor, or a list tensors all of same shape except for the first
      dimension, representing sharded embedding tensors. Alternatively, a
      `PartitionedVariable`, created by partitioning along dimension 0. Each
      element must be appropriately sized for the given `partition_strategy`.
    sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
      ids. `d_0` is typically batch size.
    sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
      float weights corresponding to `sparse_ids`, or `None` if all weights are
      be assumed to be 1.0.
    combiner: A string specifying how to combine embedding results for each
      entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
      default.
    default_id: The id to use for an entry with no features.
    name: A name for this operation (optional).
    partition_strategy: A string specifying the partitioning strategy. Currently
      `"div"` and `"mod"` are supported. Default is `"div"`.
    max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
      combining.

  Returns:
    A dense tensor representing the combined embeddings for the
    sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
    looks up the embeddings for all ids in that row, multiplies them by the
    corresponding weight, and combines these embeddings as specified.

    In other words, if

      `shape(combined embedding_weights) = [p0, p1, ..., pm]`

    and

      `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]`

    then

      `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`.

    For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are

      ```python
      [0, 0]: id 1, weight 2.0
      [0, 1]: id 3, weight 0.5
      [1, 0]: id -1, weight 1.0
      [2, 3]: id 1, weight 3.0
      ```

    `default_id` is 0.

    with `combiner`="mean", then the output will be a 3x20 matrix where

      ```python
      output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
      output[1, :] = (params[0, :] * 1.0) / 1.0
      output[2, :] = (params[1, :] * 3.0) / 3.0
      ```

  Raises:
    ValueError: if `embedding_weights` is empty.
  """
  if embedding_weights is None:
    raise ValueError("Missing embedding_weights %s." % embedding_weights)
  if isinstance(embedding_weights, variables.PartitionedVariable):
    embedding_weights = list(embedding_weights)  # get underlying Variables.
  if not isinstance(embedding_weights, list):
    embedding_weights = [embedding_weights]
  if len(embedding_weights) < 1:
    raise ValueError("Missing embedding_weights %s." % embedding_weights)

  dtype = sparse_weights.dtype if sparse_weights is not None else None
  embedding_weights = [
      w if (isinstance(w, resource_variable_ops.ResourceVariable)
            and dtype in (None, w.dtype))
      else ops.convert_to_tensor(w, dtype=dtype)
      for w in embedding_weights
  ]

  with ops.name_scope(name, "embedding_lookup", embedding_weights +
                      [sparse_ids, sparse_weights]) as scope:
    # Reshape higher-rank sparse ids and weights to linear segment ids.
    original_shape = sparse_ids.dense_shape
    original_rank_dim = tensor_shape.dimension_value(
        sparse_ids.dense_shape.get_shape()[0])
    original_rank = (
        array_ops.size(original_shape)
        if original_rank_dim is None else original_rank_dim)
    sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
        math_ops.reduce_prod(
            array_ops.slice(original_shape, [0], [original_rank - 1])),
        array_ops.gather(original_shape, original_rank - 1)
    ])
    if sparse_weights is not None:
      sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
                                                  sparse_weights.values,
                                                  sparse_ids.dense_shape)

    # Prune invalid ids and weights.
    sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
    if combiner != "sum":
      sparse_ids, sparse_weights = _prune_invalid_weights(
          sparse_ids, sparse_weights)

    # Fill in dummy values for empty features, if necessary.
    sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
        sparse_ids, default_id or 0)
    if sparse_weights is not None:
      sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)

    result = embedding_lookup_sparse(
        embedding_weights,
        sparse_ids,
        sparse_weights,
        combiner=combiner,
        partition_strategy=partition_strategy,
        name=None if default_id is None else scope,
        max_norm=max_norm)

    if default_id is None:
      # Broadcast is_row_empty to the same shape as embedding_lookup_result,
      # for use in Select.
      is_row_empty = array_ops.tile(
          array_ops.reshape(is_row_empty, [-1, 1]),
          array_ops.stack([1, array_ops.shape(result)[1]]))

      result = array_ops.where(
          is_row_empty, array_ops.zeros_like(result), result, name=scope)

    # Reshape back from linear ids back into higher-dimensional dense result.
    final_result = array_ops.reshape(
        result,
        array_ops.concat([
            array_ops.slice(
                math_ops.cast(original_shape, dtypes.int32), [0],
                [original_rank - 1]),
            array_ops.slice(array_ops.shape(result), [1], [-1])
        ], 0))
    final_result.set_shape(
        tensor_shape.unknown_shape(
            (tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate(
                result.get_shape()[1:]))
    return final_result
Esempio n. 60
0
    def testBroadcast_unknown_dims(self):
        unknown = tensor_shape.unknown_shape()
        shape_0 = tensor_shape.scalar()
        shape_1 = tensor_shape.vector(1)
        # pylint: disable=invalid-name
        shape_U = tensor_shape.vector(None)
        shape_1xU = tensor_shape.matrix(1, None)
        shape_Ux1 = tensor_shape.matrix(None, 1)
        shape_4xU = tensor_shape.matrix(4, None)
        shape_Ux4 = tensor_shape.matrix(None, 4)
        # pylint: enable=invalid-name

        # Tensors with same shape should have the same broadcast result.
        for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
            self._assert_broadcast_with_unknown_dims(expected=shape,
                                                     shape1=shape,
                                                     shape2=shape)

        # [] and [1] act like identity.
        for identity in (shape_0, shape_1):
            for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
                self._assert_broadcast_with_unknown_dims(expected=shape,
                                                         shape1=identity,
                                                         shape2=shape)

        # Unknown in, unknown out.
        for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
            self._assert_broadcast_with_unknown_dims(expected=unknown,
                                                     shape1=shape,
                                                     shape2=unknown)

        self._assert_broadcast_with_unknown_dims(expected=shape_1xU,
                                                 shape1=shape_U,
                                                 shape2=shape_1xU)
        shape_UxU = tensor_shape.matrix(None, None)  # pylint: disable=invalid-name
        self._assert_broadcast_with_unknown_dims(expected=shape_UxU,
                                                 shape1=shape_U,
                                                 shape2=shape_Ux1)
        self._assert_broadcast_with_unknown_dims(expected=shape_4xU,
                                                 shape1=shape_U,
                                                 shape2=shape_4xU)
        self._assert_broadcast_with_unknown_dims(expected=shape_Ux4,
                                                 shape1=shape_U,
                                                 shape2=shape_Ux4)
        self._assert_broadcast_with_unknown_dims(expected=shape_UxU,
                                                 shape1=shape_1xU,
                                                 shape2=shape_Ux1)
        self._assert_broadcast_with_unknown_dims(expected=shape_4xU,
                                                 shape1=shape_1xU,
                                                 shape2=shape_4xU)
        self._assert_broadcast_with_unknown_dims(expected=shape_Ux4,
                                                 shape1=shape_1xU,
                                                 shape2=shape_Ux4)
        self._assert_broadcast_with_unknown_dims(expected=shape_4xU,
                                                 shape1=shape_Ux1,
                                                 shape2=shape_4xU)
        self._assert_broadcast_with_unknown_dims(expected=shape_Ux4,
                                                 shape1=shape_Ux1,
                                                 shape2=shape_Ux4)
        shape_4x4 = tensor_shape.matrix(4, 4)
        self._assert_broadcast_with_unknown_dims(expected=shape_4x4,
                                                 shape1=shape_4xU,
                                                 shape2=shape_Ux4)