Esempio n. 1
0
def _SliceShape(op):
  """Shape function for array_ops.slice."""
  input_shape = op.inputs[0].get_shape()
  begin_shape = op.inputs[1].get_shape().with_rank_at_most(1)
  sizes_shape = op.inputs[2].get_shape().with_rank_at_most(1)
  rank_vector_shape = begin_shape.merge_with(sizes_shape)
  ndims = rank_vector_shape.num_elements()
  if ndims is not None:
    input_shape.assert_has_rank(ndims)
  begin_value = tensor_util.ConstantValue(op.inputs[1])
  sizes_value = tensor_util.ConstantValue(op.inputs[2])
  if sizes_value is not None:
    returned_dims = []
    for i, slice_size in enumerate(sizes_value.ravel()):
      if slice_size != -1:
        returned_dims.append(slice_size)
      elif begin_value is not None:
        returned_dims.append(input_shape[i] - begin_value[i])
      else:
        returned_dims.append(None)
    return [tensor_shape.TensorShape(returned_dims)]
  else:
    if input_shape.ndims is not None:
      return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
    elif ndims is not None:
      return [tensor_shape.unknown_shape(ndims=ndims)]
    else:
      return [tensor_shape.unknown_shape()]
Esempio n. 2
0
  def testConstant(self):
    np_val = np.random.rand(3, 4, 7).astype(np.float32)
    tf_val = constant_op.constant(np_val)
    self.assertAllClose(np_val, tensor_util.ConstantValue(tf_val))

    np_val = np.random.rand(3, 0, 7).astype(np.float32)
    tf_val = constant_op.constant(np_val)
    self.assertAllClose(np_val, tensor_util.ConstantValue(tf_val))
Esempio n. 3
0
def _RangeShape(op):
  start_value = tensor_util.ConstantValue(op.inputs[0])
  limit_value = tensor_util.ConstantValue(op.inputs[1])
  delta_value = tensor_util.ConstantValue(op.inputs[2])
  if start_value is None or limit_value is None or delta_value is None:
    return [tensor_shape.vector(None)]
  else:
    return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
                                delta_value)]
Esempio n. 4
0
  def testCast(self):
    np_val = np.random.rand(3, 4, 7).astype(np.float32)
    tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
    c_val = tensor_util.ConstantValue(tf_val)
    self.assertAllClose(np_val.astype(np.float64), c_val)

    np_val = np.random.rand(3, 0, 7).astype(np.float32)
    tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
    c_val = tensor_util.ConstantValue(tf_val)
    self.assertAllClose(np_val.astype(np.float64), c_val)
Esempio n. 5
0
def _EditDistanceShape(op):
  """Shape function for the EditDistance op."""
  hypothesis_shape = tensor_util.ConstantValue(op.inputs[2])
  truth_shape = tensor_util.ConstantValue(op.inputs[5])
  if hypothesis_shape is not None and truth_shape is not None:
    if len(hypothesis_shape) != len(truth_shape):
      raise ValueError(
          "Inconsistent ranks in hypothesis and truth.  Saw shapes: %s and %s" %
          (str(hypothesis_shape), str(truth_shape)))
    return [tensor_shape.TensorShape(
        [max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]

  return [tensor_shape.unknown_shape()]
Esempio n. 6
0
def _ReductionShape(op):
    """Common shape function for reduction ops."""
    input_shape = op.inputs[0].get_shape()
    reduction_indices = tensor_util.ConstantValue(op.inputs[1])
    keep_dims = op.get_attr("keep_dims")
    if reduction_indices is None or input_shape.ndims is None:
        if keep_dims:
            return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
        else:
            return [tensor_shape.unknown_shape()]

    # Turn reduction_indices from scalar to vector if necessary
    reduction_indices = np.ravel(reduction_indices)

    for reduction_index in reduction_indices:
        if reduction_index < 0 or reduction_index >= input_shape.ndims:
            raise ValueError(
                "Invalid reduction dimension %d for input with %d "
                "dimensions" % (reduction_index, input_shape.ndims))

    returned_dims = []
    if keep_dims:
        for i, dim in enumerate(input_shape.dims):
            if i in reduction_indices:
                returned_dims.append(1)
            else:
                returned_dims.append(dim)
    else:
        for i, dim in enumerate(input_shape.dims):
            if i not in reduction_indices:
                returned_dims.append(dim)
    return [tensor_shape.TensorShape(returned_dims)]
Esempio n. 7
0
def _TransposeShape(op):
  """Shape function for the Transpose op.

  This op takes two inputs:

  * input: a rank-N tensor of arbitrary shape.
  * shuffle: a length-N vector.

  Its output is the rank-N tensor computed by permuting the dimensions
  of input according to shuffle.

  Args:
    op: A Transpose op.

  Returns:
    A single-element list containing the shape of the output.

  Raises:
    ValueError: If the shapes of input and shuffle are incompatible.
    IndexError: If shuffle contains an index that is >= the rank of input.
  """
  input_shape = op.inputs[0].get_shape()
  transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
      input_shape.ndims))
  transpose_vec = tensor_util.ConstantValue(op.inputs[1])
  if transpose_vec is None:
    return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
  else:
    return [tensor_shape.TensorShape([input_shape[i]
                                      for i in transpose_vec.tolist()])]
Esempio n. 8
0
def _TileShape(op):
  """Shape function for the Tile op.

  This op has two inputs:

  * input: A rank-N tensor.
  * multiples: A length-N vector, in which the i^th element contains
    the factor by which `input` will be tiled in the i^th dimension.

  It has one output, which has the same rank as input, and additional
  elements according to the values in multiples

  Args:
    op: A Tile Operation.

  Returns:
    A single-element list containing the shape of the output.
  """
  multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
  multiples = tensor_util.ConstantValue(op.inputs[1])
  if multiples is None:
    return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
  else:
    output_dims = []
    multiples = multiples.ravel()
    for i, dim in enumerate(input_shape.dims):
      output_dims.append(dim * multiples[i])
    return [tensor_shape.TensorShape(output_dims)]
Esempio n. 9
0
def _ConcatShape(op):
  concat_dim = tensor_util.ConstantValue(op.inputs[0])
  if concat_dim is None:
    # Return an unknown shape with the same rank as the inputs, or an
    # unknown rank if no input's rank is known.
    rank = None
    for value in op.inputs[1:]:
      if rank is not None:
        value.get_shape().assert_has_rank(rank)
      else:
        rank = value.get_shape().ndims
    if rank == 0:
      raise ValueError("Can't concatenate scalars (use tf.pack instead)")
    return [tensor_shape.unknown_shape(ndims=rank)]

  else:
    # Merge all the non-concat dims, and sum the concat dim to make an
    # output shape.
    concat_dim = int(concat_dim)
    output_shape = op.inputs[1].get_shape()
    for value in op.inputs[2:]:
      value_shape = value.get_shape()
      if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
        raise ValueError("concat_dim is out of range (values rank = %d)" %
                         value_shape.ndims)
      before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
      at = output_shape[concat_dim] + value_shape[concat_dim]
      after = output_shape[
          concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
      output_shape = before.concatenate(at).concatenate(after)
    return [output_shape]
Esempio n. 10
0
def _ExpandDimsShape(op):
  """Determine shape for expand op's output tensor.

  Args:
    op: Operation for which to determine shape.
        op.inputs[0] is the input tensor.
        op.inputs[1] is the dimension in which to expand.
  Returns:
    Shape of op's output tensor.
  Raises:
    ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
        of dimensions in the input tensor.
  """
  input_shape = op.inputs[0].get_shape()
  if input_shape.dims is None:
    return [tensor_shape.unknown_shape()]
  dim = tensor_util.ConstantValue(op.inputs[1])
  input_ndims = input_shape.ndims
  if dim < -input_ndims - 1 or dim > input_ndims:
    raise ValueError(
        "dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
  if dim < 0:
    dim += (input_ndims + 1)
  result_shape = list(input_shape.dims)
  result_shape.insert(dim, 1)
  return [tensor_shape.TensorShape(result_shape)]
Esempio n. 11
0
def _RandomShape(op):
  shape_val = tensor_util.ConstantValue(op.inputs[0])
  if shape_val is not None:
    return [tensor_shape.TensorShape(shape_val.tolist())]
  else:
    shape_shape = op.inputs[0].get_shape().with_rank_at_most(1)
    return [tensor_shape.unknown_shape(ndims=shape_shape.num_elements())]
Esempio n. 12
0
    def dequeue_many(self, n, name=None):
        """Dequeues and concatenates `n` elements from this queue.

    This operation concatenates queue-element component tensors along
    the 0th dimension to make a single component tensor.  All of the
    components in the dequeued tuple will have size `n` in the 0th dimension.

    If the queue contains fewer than `n` elements when this operation
    executes, it will block until `n` elements have been dequeued.

    Args:
      n: A scalar `Tensor` containing the number of elements to dequeue.
      name: A name for the operation (optional).

    Returns:
      The tuple of concatenated tensors that was dequeued.
    """
        if name is None:
            name = "%s_DequeueMany" % self._name

        ret = gen_data_flow_ops._queue_dequeue_many(self._queue_ref,
                                                    n,
                                                    self._dtypes,
                                                    name=name)

        # NOTE(mrry): Not using a shape function because we need access to
        # the Queue object.
        op = ret[0].op
        batch_dim = tensor_shape.Dimension(
            tensor_util.ConstantValue(op.inputs[1]))
        for output, shape in zip(op.values(), self._shapes):
            output.set_shape(
                tensor_shape.TensorShape([batch_dim]).concatenate(shape))

        return ret if len(ret) != 1 else ret[0]
Esempio n. 13
0
 def assert_summary_scope(self, regexp):
     for summary in tf.get_collection(tf.GraphKeys.SUMMARIES):
         tag = tensor_util.ConstantValue(summary.op.inputs[0])
         assert tag is not None, 'All summaries have constant tags'
         tag = str(tag)
         assert isinstance(tag[0], six.string_types), tag[0]
         assert re.match(regexp,
                         tag), "tag doesn't match %s: %s" % (regexp, tag)
Esempio n. 14
0
def _SparseToDenseShape(op):
  input_shape = tensor_util.ConstantValue(op.inputs[1])
  if input_shape is not None:
    if np.ndim(input_shape) > 1:
      raise ValueError("Input shape should be a vector")
    return [tensor_shape.TensorShape(input_shape.tolist())]
  else:
    input_shape_shape = op.inputs[1].get_shape().with_rank_at_most(1)
    return [tensor_shape.unknown_shape(ndims=input_shape_shape.num_elements())]
Esempio n. 15
0
  def testConcat(self):
    np_val = np.random.rand(3, 4, 7).astype(np.float32)
    tf_val = array_ops.concat(
        0, [np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]])
    c_val = tensor_util.ConstantValue(tf_val)
    self.assertAllClose(np_val, c_val)

    tf_val = array_ops.concat(
        array_ops.placeholder(dtypes.int32),
        [np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]])
    c_val = tensor_util.ConstantValue(tf_val)
    self.assertIs(None, c_val)

    tf_val = array_ops.concat(
        1,
        [np_val[0, :, :], array_ops.placeholder(dtypes.float32),
         np_val[2, :, :]])
    c_val = tensor_util.ConstantValue(tf_val)
    self.assertIs(None, c_val)
Esempio n. 16
0
def _UnsortedSegmentSumShape(op):
  """Shape function for UnsortedSegmentSum."""
  data_shape = op.inputs[0].get_shape()
  segment_ids_shape = op.inputs[1].get_shape()
  mid = segment_ids_shape.ndims
  if mid is None:
    return [tensor_shape.unknown_shape()]
  else:
    num_segments = tensor_util.ConstantValue(op.inputs[2])
    return [tensor_shape.TensorShape([num_segments]).concatenate(
        data_shape[mid:])]
Esempio n. 17
0
def _Conv2DBackpropInputShape(op):
  """Shape function for the Conv2DBackpropInput op."""
  input_shape = tensor_util.ConstantValue(op.inputs[0])
  if input_shape is not None:
    return [tensor_shape.TensorShape(input_shape.tolist())]
  else:
    # NOTE(mrry): We could in principle work out the shape from the
    # gradients and the attrs, but if we do not know input_shape
    # statically, then we are unlikely to know the shape of the
    # gradients either.
    return [tensor_shape.unknown_shape(ndims=4)]
Esempio n. 18
0
def _ResizeShape(op):
  """Shape function for the resize_bilinear and resize_nearest_neighbor ops."""
  input_shape = op.inputs[0].get_shape().with_rank(4)
  size = tensor_util.ConstantValue(op.inputs[1])
  if size is not None:
    height = size[0]
    width = size[1]
  else:
    height = None
    width = None
  return [tensor_shape.TensorShape(
      [input_shape[0], height, width, input_shape[3]])]
Esempio n. 19
0
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
  multiples = tensor_util.ConstantValue(op.inputs[1])
  if multiples is None:
    return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
  else:
    output_dims = []
    for i, dim in enumerate(input_shape.dims):
      output_dims.append(dim // multiples[i])
    return [tensor_shape.TensorShape(output_dims)]
Esempio n. 20
0
def _SparseSegmentMeanGradShape(op):
  """Shape function for the SparseSegmentMeanGrad op."""
  input_shape = op.inputs[0].get_shape()
  indices_shape = op.inputs[1].get_shape().with_rank(1)
  unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
  unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
      tensor_shape.scalar())
  output_dim0 = tensor_util.ConstantValue(op.inputs[3])
  if output_dim0 is not None:
    dim0 = output_dim0[0]
  else:
    dim0 = None
  return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
Esempio n. 21
0
def _TopKShape(op):
    """Shape function for TopK and TopKV2 ops."""
    input_shape = op.inputs[0].get_shape().with_rank_at_least(1)
    if len(op.inputs) >= 2:
        k = tensor_util.ConstantValue(op.inputs[1])
    else:
        k = op.get_attr("k")
    last = input_shape[-1].value
    if last is not None and last < k:
        raise ValueError("input.shape %s must have last dimension >= k = %d" %
                         (input_shape, k))
    output_shape = input_shape[:-1].concatenate([k])
    return [output_shape, output_shape]
Esempio n. 22
0
def _ReshapeShape(op):
    """Shape function for Reshape op."""
    input_shape = op.inputs[0].get_shape()
    if input_shape.ndims is not None:
        num_elements = tensor_shape.Dimension(1)
        for dim in input_shape.dims:
            num_elements *= dim
    else:
        num_elements = tensor_shape.Dimension(None)
    new_shape_shape = op.inputs[1].get_shape().with_rank_at_most(1)
    new_shape = tensor_util.ConstantValue(op.inputs[1])
    if new_shape is None:
        # Attempt to infer the rank of the output from the length of
        # new_shape.
        return [
            tensor_shape.unknown_shape(ndims=new_shape_shape.num_elements())
        ]
    new_shape = np.reshape(new_shape, -1).tolist()
    if -1 not in new_shape:
        # The new shape is fully defined.
        if (num_elements.value is not None
                and num_elements.value != np.prod(new_shape)):
            raise ValueError(
                "Cannot reshape a tensor with %d elements to shape %s (%d elements)"
                % (num_elements.value, new_shape, np.prod(new_shape)))
        return [tensor_shape.TensorShape(new_shape)]
    elif num_elements.value is not None:
        # We know the number of elements, so we can calculate the missing
        # dimension in the new_shape.
        known_elements = 1
        unknown_index = None
        for i, dim in enumerate(new_shape):
            if dim == -1:
                unknown_index = i
            else:
                known_elements *= dim
        if known_elements == 0:
            raise ValueError("cannot infer the missing input size for "
                             "an empty tensor unless all specified "
                             "input sizes are non-zero")
        if num_elements % known_elements != 0:
            raise ValueError(
                "input has %s elements, which isn't divisible by %d" %
                (num_elements, known_elements))
        new_shape[unknown_index] = num_elements // known_elements
        return [tensor_shape.TensorShape(new_shape)]
    else:
        # We don't know the input shape, but we know n-1 of the dimensions
        # in the new shape.
        new_shape[new_shape.index(-1)] = None
        return [tensor_shape.TensorShape(new_shape)]
Esempio n. 23
0
def _random_cropShape(op):
    """Shape function for the random_crop op."""
    input_shape = op.inputs[0].get_shape().with_rank(3)
    unused_size_shape = op.inputs[1].get_shape().merge_with(
        tensor_shape.vector(2))
    size = tensor_util.ConstantValue(op.inputs[1])
    if size is not None:
        height = size[0]
        width = size[1]
    else:
        height = None
        width = None
    channels = input_shape[2]
    return [tensor_shape.TensorShape([height, width, channels])]
Esempio n. 24
0
def _ExtractGlimpseShape(op):
    """Shape function for ExtractGlimpse op."""
    input_shape = op.inputs[0].get_shape().with_rank(4)
    unused_size_shape = op.inputs[1].get_shape().merge_with(
        tensor_shape.vector(2))
    offsets_shape = op.inputs[2].get_shape().merge_with(
        input_shape[:1].concatenate([2]))
    offsets_shape = offsets_shape
    size_value = tensor_util.ConstantValue(op.inputs[1])
    if size_value is not None:
        height = size_value[0]
        width = size_value[1]
    else:
        height = None
        width = None
    return [
        tensor_shape.TensorShape(
            [input_shape[0], height, width, input_shape[3]])
    ]
Esempio n. 25
0
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
  """Converts an IndexedSlices object `value` to a Tensor.

  NOTE(mrry): This function is potentially expensive.

  Args:
    value: An ops.IndexedSlices object.
    dtype: The dtype of the Tensor to be returned.
    name: Optional name to use for the returned Tensor.
    as_ref: True if a ref is requested.

  Returns:
    A dense Tensor representing the values in the given IndexedSlices.

  Raises:
    ValueError: If the IndexedSlices does not have the same dtype.
  """
  _ = as_ref
  if dtype and not dtype.is_compatible_with(value.dtype):
    raise ValueError(
        "Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
        (dtype.name, value.dtype.name))
  if value.dense_shape is None:
    raise ValueError(
        "Tensor conversion requested for IndexedSlices without dense_shape: %s"
        % str(value))
  # TODO(mrry): Consider adding static shape information to
  # IndexedSlices, to avoid using numpy here.
  dense_shape_value = tensor_util.ConstantValue(value.dense_shape)
  if dense_shape_value is not None:
    num_elements = np.prod(dense_shape_value)
    if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
      warnings.warn(
          "Converting sparse IndexedSlices to a dense Tensor with %d elements. "
          "This may consume a large amount of memory." % num_elements)
  else:
    warnings.warn(
        "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
        "This may consume a large amount of memory.")
  return math_ops.unsorted_segment_sum(value.values,
                                       value.indices,
                                       value.dense_shape[0],
                                       name=name)
Esempio n. 26
0
def _ConcatShape(op):
    concat_dim = tensor_util.ConstantValue(op.inputs[0])
    if concat_dim is None:
        # Return an unknown shape with the same rank as the inputs, or an
        # unknown rank if no input's rank is known.
        rank = None
        for value in op.inputs[1:]:
            if rank is not None:
                value.get_shape().assert_has_rank(rank)
            else:
                rank = value.get_shape().ndims
        # TODO(irving): Remove once !kAllowLegacyScalars.
        if rank is not None:
            rank = max(rank, 1)
        return [tensor_shape.unknown_shape(ndims=rank)]

    else:
        # Merge all the non-concat dims, and sum the concat dim to make an
        # output shape.
        concat_dim = int(concat_dim)
        output_shape = op.inputs[1].get_shape()
        # TODO(irving): Remove once !kAllowLegacyScalars.
        if output_shape.ndims == 0:
            output_shape = tensor_shape.TensorShape([1])
        for value in op.inputs[2:]:
            value_shape = value.get_shape()
            if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
                if value_shape.ndims == 0 and concat_dim == 0:
                    # Let concat handle scalars
                    # TODO(irving): Remove once !kAllowLegacyScalars.
                    value_shape = tensor_shape.TensorShape([1])
                else:
                    raise ValueError(
                        "concat_dim is out of range (values rank = %d)" %
                        value_shape.ndims)
            before = output_shape[:concat_dim].merge_with(
                value_shape[:concat_dim])
            at = output_shape[concat_dim] + value_shape[concat_dim]
            after = output_shape[concat_dim + 1:].merge_with(
                value_shape[concat_dim + 1:])
            output_shape = before.concatenate(at).concatenate(after)
        return [output_shape]
Esempio n. 27
0
def _SplitShape(op):
  """Shape function for the Split op."""
  split_dim = tensor_util.ConstantValue(op.inputs[0])
  num_split = len(op.outputs)
  input_shape = op.inputs[1].get_shape()
  if split_dim is None:
    return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
  else:
    split_dim = int(split_dim)
    input_shape = input_shape.with_rank_at_least(split_dim + 1)
    if not (input_shape[split_dim] % num_split).is_compatible_with(0):
      raise ValueError(
          "Number of ways to split should evenly divide the split "
          "dimension but got split_dim %d (size = %d) and num_split %d" %
          (split_dim, input_shape[split_dim].value, num_split))
    prefix = input_shape[:split_dim]
    size_in_split_dim = input_shape[split_dim] // num_split
    suffix = input_shape[split_dim + 1:]
    output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
    return [output_shape] * num_split
Esempio n. 28
0
def _PadShape(op):
    """Shape function for the Pad op.

  This op has two inputs:

  * input: A rank-N tensor.
  * paddings: An N-by-2 matrix, in which the i^th row contains the
    number of padding elements to add before and after `input` in the
    i^th dimension.

  It has one output, which has the same rank as input, and additional
  elements according to the values in paddings.

  Args:
    op: A Pad Operation.

  Returns:
    A single-element list containing the shape of the output.

  Raises:
    ValueError: If the input shapes are incompatible.
  """
    paddings_shape = op.inputs[1].get_shape().with_rank(2)
    input_shape = op.inputs[0].get_shape()
    if input_shape.ndims == 0 and paddings_shape[0].value == 1:
        # TODO(irving): Remove once !kAllowLegacyScalars.
        input_shape = tensor_shape.TensorShape([1])
    else:
        input_shape = input_shape.with_rank(paddings_shape[0].value)
    paddings_shape = paddings_shape.merge_with(
        tensor_shape.matrix(input_shape.ndims, 2))
    paddings = tensor_util.ConstantValue(op.inputs[1])
    if paddings is None:
        return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
    else:
        output_dims = []
        for i, dim in enumerate(input_shape.dims):
            if paddings[i, 0] < 0 or paddings[i, 1] < 0:
                raise ValueError("paddings must be non-negative")
            output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
        return [tensor_shape.TensorShape(output_dims)]
Esempio n. 29
0
def _FillShape(op):
  """Shape function for the Fill op.

  This op takes a vector of dimensions and a scalar, and produces a
  tensor with the given dimensions.

  Args:
    op: A Fill Operation.

  Returns:
    A single-element list containing the shape of the output.
  """
  dimensions_shape = op.inputs[0].get_shape().with_rank_at_most(1)
  op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())
  fill_dims = tensor_util.ConstantValue(op.inputs[0])
  if fill_dims is None:
    # Attempt to infer the rank of the output from the length of
    # dimensions.
    return [tensor_shape.unknown_shape(ndims=dimensions_shape.num_elements())]
  else:
    return [tensor_shape.TensorShape(fill_dims.tolist())]
Esempio n. 30
0
def _ArgOpShape(op):
    """Common shape function for arg-reduction ops."""
    dimension_shape = op.inputs[1].get_shape()
    dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
    input_shape = op.inputs[0].get_shape()
    if input_shape.ndims is None:
        return [tensor_shape.unknown_shape()]
    elif input_shape.ndims <= 1:
        return [tensor_shape.scalar()]

    dimension = tensor_util.ConstantValue(op.inputs[1])
    if dimension is None:
        return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
    elif 0 <= dimension and dimension < input_shape.ndims:
        returned_shape = []
        for i, dim in enumerate(input_shape.dims):
            if i != dimension:
                returned_shape.append(dim)
        return [tensor_shape.TensorShape(returned_shape)]
    else:
        raise ValueError(
            "dimension (%d) must be in the range [0, %d), where %d is the number "
            "of dimensions in the input" %
            (dimension, input_shape.ndims, input_shape.ndims))