Example #1
0
class LabeledTensor(object):
  """A tensor with annotated axes.

  It has the following invariants:
    1) The dimensionality of the tensor is equal to the number of elements
    in axes.
    2) The number of coordinate values in the ith dimension is equal to the
    size of the tensor in the ith dimension.

  Attributes:
    tensor: tf.Tensor containing the data.
    axes: lt.Axes containing axis names and coordinate labels.
  """

  @tc.accepts(object, ops.Tensor,
              tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike))))
  def __init__(self, tensor, axes):
    """Construct a LabeledTenor.

    Args:
      tensor: The underlying tensor containing the data.
      axes: An Axes object, or a collection of strings, Axis objects or tuples
        of (name, value) pairs indicating the axes.

    Raises:
      ValueError: If the provided axes do not satisfy the class invariants.
    """
    self._tensor = tensor
    shape = tensor.get_shape()

    if isinstance(axes, Axes):
      unvalidated_axes = axes
    else:
      mutable_axes = []

      for position, axis_like in enumerate(axes):
        if isinstance(axis_like, string_types):
          # The coordinates for this axes are unlabeled.
          # Infer the size of the axis.
          value = shape[position]
          axis_like = (axis_like, value)

        mutable_axes.append(axis_like)

      # Construct the Axis object, which will additionally validate the contents
      # of the object.
      unvalidated_axes = Axes(mutable_axes)

    # Check our invariants.

    # First, the rank of the tensor must be equal to the number of axes.
    if len(shape) != len(unvalidated_axes):
      raise ValueError('Tensor rank was not equal to the number of axes: %r, %r'
                       % (shape, unvalidated_axes))

    # Second, the size of each tensor dimension must match the size of the
    # corresponding indices.
    for (d, axis) in zip(shape, unvalidated_axes.values()):
      if d != axis.size:
        raise ValueError(
            'Provided axis size %d does not match tensor dimension size %d' %
            (axis.size, d))

    self._axes = unvalidated_axes

  def __repr__(self):
    # <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32
    #  axes=[('x', Dimension(2)),
    #        ('y', ('a', 'b', 'c'),
    #        ('z', Dimension(4))]>
    axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()]
    axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes)
    return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" %
            (type(self).__name__, self.tensor.name, self.tensor.get_shape(),
             self.tensor.dtype.name, axes_repr))

  @property
  def tensor(self):
    return self._tensor

  def _as_graph_element(self):
    """Support tf.Graph.as_graph_element on LabeledTensor objects.

    This allows operations such as tf.name_scope to take labeled tensors.

    Returns:
      self.tensor
    """
    return self.tensor

  @property
  def axes(self):
    return self._axes

  # properties/methods directly borrowed from tf.Tensor:

  @property
  def dtype(self):
    return self._tensor.dtype

  @property
  def name(self):
    return self._tensor.name

  def get_shape(self):
    """Returns the TensorShape that represents the shape of this tensor.

    See tf.Tensor.get_shape().

    Returns:
      A TensorShape representing the shape of this tensor.
    """
    return self._tensor.get_shape()

  # TODO(shoyer): consider how/if to implement .eval(). Maybe it should return
  # an xarray.DataArray?

  def __getitem__(self, key):
    # This should work exactly like tf.Tensor.__getitem__, except it preserves
    # labels.
    if not isinstance(key, tuple):
      key = (key,)
    if len(key) != len(self.axes):
      raise ValueError('indexer %r must have the same length as the Tensor '
                       'rank (%r)' % (key, len(self.axes)))
    selection = {a: k for a, k in zip(self.axes.keys(), key)}
    return slice_function(self, selection)

  # special methods for overloading arithmetic operations:

  def __abs__(self):
    return abs_function(self)

  def __neg__(self):
    return neg(self)

  def __pos__(self):
    return self

  def __add__(self, other):
    return add(self, other)

  def __radd__(self, other):
    return add(other, self)

  def __sub__(self, other):
    return sub(self, other)

  def __rsub__(self, other):
    return sub(other, self)

  def __mul__(self, other):
    return mul(self, other)

  def __rmul__(self, other):
    return mul(other, self)

  def __truediv__(self, other):
    return div(self, other)

  __div__ = __truediv__

  def __rtruediv__(self, other):
    return div(other, self)

  __rdiv__ = __rtruediv__

  def __mod__(self, other):
    return mod(self, other)

  def __rmod__(self, other):
    return mod(other, self)

  def __pow__(self, other):
    return pow_function(self, other)

  def __rpow__(self, other):
    return pow_function(other, self)

  # logical operations:

  def __invert__(self):
    return logical_not(self)

  def __and__(self, other):
    return logical_and(self, other)

  def __or__(self, other):
    return logical_or(self, other)

  def __xor__(self, other):
    return logical_xor(self, other)

  # boolean operations:

  def __lt__(self, other):
    return less(self, other)

  def __le__(self, other):
    return less_equal(self, other)

  def __gt__(self, other):
    return greater(self, other)

  def __ge__(self, other):
    return greater_equal(self, other)

  def __eq__(self, other):
    # for consistency with tf.Tensor
    if not isinstance(other, LabeledTensor):
      return False

    return self.tensor == other.tensor and self.axes == other.axes

  def __ne__(self, other):
    return not self == other

  def __hash__(self):
    return hash((self.tensor, self.axes))
Example #2
0
    axes = value.axes.values()
    value = value.tensor
  else:
    axes = []

  # We call convert_to_tensor even for LabeledTensor input because it also
  # checks to make sure the dtype argument is compatible.
  tensor = ops.convert_to_tensor(value, dtype=dtype, name=name)
  if len(tensor.get_shape()) != len(axes):
    raise ValueError('cannot automatically convert unlabeled arrays or tensors '
                     'with rank>0 into LabeledTensors: %r' % value)
  return LabeledTensor(tensor, axes)


@tc.returns(Axis)
@tc.accepts(tc.Collection(Axis))
def concat_axes(axes):
  """Concatenate a list of Axes.

  Args:
    axes: A collection of Axis objects.

  Returns:
    The concatenation of the axes.
    If all axes have labels, the result has the concatenation of the labels.
    Else, the result has no labels, and its size is the sum of the sizes
    of the axes.

  Raises:
    ValueError: If `others` is not a collection of Axes or if it is empty.
  """
  """
    serialized = core.convert_to_labeled_tensor(serialized)
    unlabeled_features = _labeled_to_unlabeled_features(features)

    unlabeled_parsed = parsing_ops.parse_single_example(
        serialized.tensor, unlabeled_features, name, example_names)

    parsed = {}
    for name, parsed_feature in unlabeled_parsed.items():
        parsed[name] = core.LabeledTensor(parsed_feature, features[name].axes)

    return parsed


@tc.returns(core.LabeledTensor)
@tc.accepts(dtypes.DType, tc.Collection(tc.Union(string_types, core.AxisLike)),
            tc.Optional(string_types))
def placeholder(dtype, axes, name=None):
    """Create a placeholder for a labeled tensor.

  For example:

    lt.placeholder(tf.float32, ['batch', ('channel', ['r', 'g', 'b'])])

  See tf.compat.v1.placeholder for more details.

  Args:
    dtype: The type of elements in the tensor to be fed.
    axes: sequence of strings (denoting axes of unknown size) and/or objects
      convertable to lt.Axis to label the result.
    name: Optional op name.
Example #4
0
        # For now, handle array selection separately, because tf.gather_nd does
        # not support gradients yet. Later, using gather_nd will let us combine
        # these paths.
        if indexers:
            (axis_name, indexer), = indexers.items()
            axis = core.Axis(axis_name, selection[axis_name])
            return _gather_1d_on_axis(labeled_tensor,
                                      indexer,
                                      axis,
                                      name=scope)
        else:
            return core.slice_function(labeled_tensor, slices, name=scope)


@tc.returns(core.LabeledTensor)
@tc.accepts(tc.Collection(core.LabeledTensorLike), string_types,
            tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
    """Concatenate tensors along a dimension.

  See tf.concat.

  Args:
    labeled_tensors: A list of input LabeledTensors.
    axis_name: The name of the axis along which to concatenate.
    name: Optional op name.

  Returns:
    The concatenated tensor.
    The coordinate labels for the concatenation dimension are also concatenated,
    if they are available for every tensor.
Example #5
0
class ReshapeCoder(object):
    """Utility class for mapping to and from another shape.

  For example, say you have a function `crop_center` which expects a
  LabeledTensor with axes named ['batch', 'row', 'column', 'depth'], and
  you have a LabeledTensor `masked_image_lt` with axes ['batch', 'row',
  'column', 'channel', 'mask'].

  To call `crop_center` with `masked_image_lt` you'd normally have to write:

  >>> reshape_lt = lt.reshape(masked_image_lt, ['channel', 'mask'], ['depth'])
  >>> crop_lt = crop_center(reshape_lt)
  >>> result_lt = lt.reshape(crop_lt, ['depth'],
  ...   [masked_image_lt.axes['channel'], masked_image_lt.axes['mask']])

  ReshapeCoder takes care of this renaming logic for you, allowing you to
  instead write:

  >>> rc = ReshapeCoder(['channel', 'mask'], ['depth'])
  >>> result_lt = rc.decode(crop_center(rc.encode(masked_image_lt)))

  Here, `decode` restores the original axes 'channel' and 'mask', so
  `crop_center` must not have modified the size of the 'depth' axis.
  """
    @tc.accepts(object, tc.Collection(str),
                tc.Collection(tc.Union(str, core.AxisLike)), tc.Optional(str))
    def __init__(self, existing_axis_names, new_axes, name=None):
        self._name = name
        self._existing_axis_names = existing_axis_names
        self._new_axes = new_axes

        self._existing_axes = None

    @tc.returns(core.LabeledTensor)
    @tc.accepts(object, core.LabeledTensorLike)
    def encode(self, labeled_tensor):
        """Reshape the input to the target shape.

    If called several times, the axes named in existing_axis_names must be
    identical.

    Args:
      labeled_tensor: The input tensor.

    Returns:
      The input reshaped to the target shape.

    Raises:
      ValueError: If the axes in existing_axis_names don't match the axes of
        a tensor in a previous invocation of this method.
    """
        with tf_ops.name_scope(self._name, 'lt_reshape_encode',
                               [labeled_tensor]) as scope:
            labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

            reshape_lt = ops.reshape(labeled_tensor,
                                     self._existing_axis_names,
                                     self._new_axes,
                                     name=scope)

            axes = [labeled_tensor.axes[n] for n in self._existing_axis_names]
            if self._existing_axes is not None and self._existing_axes != axes:
                raise ValueError(
                    'input axes %r do not match axes from previous method call %r'
                    % (axes, self._existing_axes))
            else:
                self._existing_axes = axes

            return reshape_lt

    @tc.returns(core.LabeledTensor)
    @tc.accepts(object, core.LabeledTensorLike)
    def decode(self, labeled_tensor):
        """Reshape the input to the original shape.

    This is the inverse of encode.
    Encode must have been called at least once prior to this method being
    called.

    Args:
      labeled_tensor: The input tensor.

    Returns:
      The input reshaped to the original shape.

    Raises:
      ValueError: If this method was called before encode was called.
    """
        if self._existing_axes is None:
            raise ValueError('decode called before encode')

        with tf_ops.name_scope(self._name, 'lt_reshape_decode',
                               [labeled_tensor]) as scope:
            labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

            new_axis_names = [
                axis
                if isinstance(axis, string_types) else core.as_axis(axis).name
                for axis in self._new_axes
            ]

            return ops.reshape(labeled_tensor,
                               new_axis_names,
                               self._existing_axes,
                               name=scope)