Exemplo n.º 1
0
    def test(self):
        xyz = ['x', 'y', 'z']
        abc = ['a', 'b', 'c']

        self.assertIsNone(core.get_axis_order())

        with core.axis_order_scope(xyz):
            self.assertEqual(core.get_axis_order(), xyz)

            with core.axis_order_scope():
                self.assertIsNone(core.get_axis_order())

                with core.axis_order_scope(abc):
                    self.assertEqual(core.get_axis_order(), abc)

                self.assertIsNone(core.get_axis_order())

            self.assertEqual(core.get_axis_order(), xyz)

        self.assertIsNone(core.get_axis_order())
Exemplo n.º 2
0
  def test(self):
    xyz = ['x', 'y', 'z']
    abc = ['a', 'b', 'c']

    self.assertIsNone(core.get_axis_order())

    with core.axis_order_scope(xyz):
      self.assertEqual(core.get_axis_order(), xyz)

      with core.axis_order_scope():
        self.assertIsNone(core.get_axis_order())

        with core.axis_order_scope(abc):
          self.assertEqual(core.get_axis_order(), abc)

        self.assertIsNone(core.get_axis_order())

      self.assertEqual(core.get_axis_order(), xyz)

    self.assertIsNone(core.get_axis_order())
Exemplo n.º 3
0
def matmul(a, b, name=None):
    """Matrix multiply two tensors with rank 1 or 2.

  If both tensors have rank 2, a matrix-matrix product is performed.
  If one tensor has rank 1 and the other has rank 2, then a matrix-vector
  product is performed.
  If both tensors have rank 1, then a vector dot-product is performed.
  (This behavior matches that of `numpy.dot`.)

  Both tensors must share exactly one dimension in common, which is the
  dimension the operation is summed along. The inputs will be automatically
  transposed if necessary as part of the matmul op.

  We intend to eventually support `matmul` on higher rank input, and also
  eventually support summing over any number shared dimensions (via an `axis`
  argument), but neither of these features has been implemented yet.

  Args:
    a: First LabeledTensor.
    b: Second LabeledTensor.
    name: Optional op name.

  Returns:
    LabeledTensor with the result of matrix multiplication. Axes are ordered by
    the current axis_order_scope, if set, or in or order of appearance on the
    inputs.

  Raises:
    NotImplementedError: If inputs have rank >2 or share multiple axes.
    ValueError: If the inputs have rank 0 or do not share any axes.
  """
    with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:

        a = core.convert_to_labeled_tensor(a)
        b = core.convert_to_labeled_tensor(b)

        if len(a.axes) > 2 or len(b.axes) > 2:
            # We could pass batched inputs to tf.matmul to make this work, but we
            # would also need to use tf.tile and/or tf.transpose. These are more
            # expensive than doing reshapes, so it's not clear if it's a good idea to
            # do this automatically.
            raise NotImplementedError(
                'matmul currently requires inputs with rank 2 or less, but '
                'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

        if not a.axes or not b.axes:
            raise ValueError(
                'matmul currently requires inputs with at least rank 1, but '
                'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

        shared_axes = set(a.axes) & set(b.axes)
        if len(shared_axes) > 1:
            raise NotImplementedError(
                'matmul does not yet support summing over multiple shared axes: %r. '
                'Use transpose and reshape to create a single shared axis to sum '
                'over.' % shared_axes)
        if not shared_axes:
            raise ValueError(
                'there must have exactly one axis in common between '
                'input to matmul: %r, %r' % (a.axes.keys(), b.axes.keys()))
        shared_axis, = shared_axes

        if a.axes[shared_axis] != b.axes[shared_axis]:
            raise ValueError(
                'axis %r does not match on input arguments: %r vs %r' %
                (shared_axis, a.axes[shared_axis].value,
                 b.axes[shared_axis].value))

        result_axes = []
        for axes in [a.axes, b.axes]:
            for axis in axes.values():
                if axis.name != shared_axis:
                    result_axes.append(axis)

        axis_scope_order = core.get_axis_order()
        if axis_scope_order is not None:
            result_axis_names = [axis.name for axis in result_axes]
            new_axis_names = [
                name for name in axis_scope_order if name in result_axis_names
            ]
            if new_axis_names != result_axis_names:
                # switch a and b
                b, a = a, b
                # result_axes is a list of length 1 or 2
                result_axes = result_axes[::-1]

        squeeze_dims = []

        if len(a.axes) == 1:
            a_tensor = array_ops.reshape(a.tensor, (1, -1))
            squeeze_dims.append(0)
            transpose_a = False
        else:
            a_tensor = a.tensor
            transpose_a = list(a.axes.keys()).index(shared_axis) == 0

        if len(b.axes) == 1:
            b_tensor = array_ops.reshape(b.tensor, (-1, 1))
            squeeze_dims.append(1)
            transpose_b = False
        else:
            b_tensor = b.tensor
            transpose_b = list(b.axes.keys()).index(shared_axis) == 1

        result_op = math_ops.matmul(a_tensor,
                                    b_tensor,
                                    transpose_a=transpose_a,
                                    transpose_b=transpose_b)

        if squeeze_dims:
            result_op = array_ops.squeeze(result_op, squeeze_dims)
        result_op = array_ops.identity(result_op, name=scope)

        return core.LabeledTensor(result_op, result_axes)
Exemplo n.º 4
0
def matmul(a, b, name=None):
  """Matrix multiply two tensors with rank 1 or 2.

  If both tensors have rank 2, a matrix-matrix product is performed.
  If one tensor has rank 1 and the other has rank 2, then a matrix-vector
  product is performed.
  If both tensors have rank 1, then a vector dot-product is performed.
  (This behavior matches that of `numpy.dot`.)

  Both tensors must share exactly one dimension in common, which is the
  dimension the operation is summed along. The inputs will be automatically
  transposed if necessary as part of the matmul op.

  We intend to eventually support `matmul` on higher rank input, and also
  eventually support summing over any number shared dimensions (via an `axis`
  argument), but neither of these features has been implemented yet.

  Args:
    a: First LabeledTensor.
    b: Second LabeledTensor.
    name: Optional op name.

  Returns:
    LabeledTensor with the result of matrix multiplication. Axes are ordered by
    the current axis_order_scope, if set, or in or order of appearance on the
    inputs.

  Raises:
    NotImplementedError: If inputs have rank >2 or share multiple axes.
    ValueError: If the inputs have rank 0 or do not share any axes.
  """
  with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:

    a = core.convert_to_labeled_tensor(a)
    b = core.convert_to_labeled_tensor(b)

    if len(a.axes) > 2 or len(b.axes) > 2:
      # We could pass batched inputs to tf.matmul to make this work, but we
      # would also need to use tf.tile and/or tf.transpose. These are more
      # expensive than doing reshapes, so it's not clear if it's a good idea to
      # do this automatically.
      raise NotImplementedError(
          'matmul currently requires inputs with rank 2 or less, but '
          'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

    if not a.axes or not b.axes:
      raise ValueError(
          'matmul currently requires inputs with at least rank 1, but '
          'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))

    shared_axes = set(a.axes) & set(b.axes)
    if len(shared_axes) > 1:
      raise NotImplementedError(
          'matmul does not yet support summing over multiple shared axes: %r. '
          'Use transpose and reshape to create a single shared axis to sum '
          'over.' % shared_axes)
    if not shared_axes:
      raise ValueError('there must have exactly one axis in common between '
                       'input to matmul: %r, %r' %
                       (a.axes.keys(), b.axes.keys()))
    shared_axis, = shared_axes

    if a.axes[shared_axis] != b.axes[shared_axis]:
      raise ValueError('axis %r does not match on input arguments: %r vs %r' %
                       (shared_axis, a.axes[shared_axis].value,
                        b.axes[shared_axis].value))

    result_axes = []
    for axes in [a.axes, b.axes]:
      for axis in axes.values():
        if axis.name != shared_axis:
          result_axes.append(axis)

    axis_scope_order = core.get_axis_order()
    if axis_scope_order is not None:
      result_axis_names = [axis.name for axis in result_axes]
      new_axis_names = [
          name for name in axis_scope_order if name in result_axis_names
      ]
      if new_axis_names != result_axis_names:
        # switch a and b
        b, a = a, b
        # result_axes is a list of length 1 or 2
        result_axes = result_axes[::-1]

    squeeze_dims = []

    if len(a.axes) == 1:
      a_tensor = array_ops.reshape(a.tensor, (1, -1))
      squeeze_dims.append(0)
      transpose_a = False
    else:
      a_tensor = a.tensor
      transpose_a = list(a.axes.keys()).index(shared_axis) == 0

    if len(b.axes) == 1:
      b_tensor = array_ops.reshape(b.tensor, (-1, 1))
      squeeze_dims.append(1)
      transpose_b = False
    else:
      b_tensor = b.tensor
      transpose_b = list(b.axes.keys()).index(shared_axis) == 1

    result_op = math_ops.matmul(
        a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)

    if squeeze_dims:
      result_op = array_ops.squeeze(result_op, squeeze_dims)
    result_op = array_ops.identity(result_op, name=scope)

    return core.LabeledTensor(result_op, result_axes)