Esempio n. 1
0
def _MatMulGrad(op, grad):
    """Gradient for MatMul."""

    t_a = op.get_attr("transpose_a")
    t_b = op.get_attr("transpose_b")
    a = math_ops.conj(op.inputs[0])
    b = math_ops.conj(op.inputs[1])
    # pylint: disable=protected-access
    if not t_a and not t_b:
        grad_a = gen_math_ops._mat_mul(grad, b, transpose_b=True)
        grad_b = gen_math_ops._mat_mul(a, grad, transpose_a=True)
    elif not t_a and t_b:
        grad_a = gen_math_ops._mat_mul(grad, b)
        grad_b = gen_math_ops._mat_mul(grad, a, transpose_a=True)
    elif t_a and not t_b:
        grad_a = gen_math_ops._mat_mul(b, grad, transpose_b=True)
        grad_b = gen_math_ops._mat_mul(a, grad)
    elif t_a and t_b:
        grad_a = gen_math_ops._mat_mul(b,
                                       grad,
                                       transpose_a=True,
                                       transpose_b=True)
        grad_b = gen_math_ops._mat_mul(grad,
                                       a,
                                       transpose_a=True,
                                       transpose_b=True)
    # pylint: enable=protected-access
    return grad_a, grad_b
Esempio n. 2
0
def matmul(a, b,
           transpose_a=False, transpose_b=False,
           a_is_sparse=False, b_is_sparse=False,
           name=None):
  """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.

  The inputs must be two-dimensional matrices, with matching inner dimensions,
  possibly after transposition.

  Both matrices must be of the same type. The supported types are:
  `float`, `double`, `int32`, `complex64`.

  Either matrix can be transposed on the fly by setting the corresponding flag
  to `True`. This is `False` by default.

  If one or both of the matrices contain a lot of zeros, a more efficient
  multiplication algorithm can be used by setting the corresponding
  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.

  For example:

  ```python
  # 2-D tensor `a`
  a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
                                                        [4. 5. 6.]]
  # 2-D tensor `b`
  b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
                                                           [9. 10.]
                                                           [11. 12.]]
  c = tf.matmul(a, b) => [[58 64]
                          [139 154]]
  ```

  Args:
    a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
    b: `Tensor` with same type as `a`.
    transpose_a: If `True`, `a` is transposed before multiplication.
    transpose_b: If `True`, `b` is transposed before multiplication.
    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
    name: Name for the operation (optional).

  Returns:
    A `Tensor` of the same type as `a`.
  """
  with ops.op_scope([a, b], name, "MatMul") as name:
    a = ops.convert_to_tensor(a, name="a")
    b = ops.convert_to_tensor(b, name="b")
    if a.dtype == dtypes.float32 and (a_is_sparse or b_is_sparse):
      return sparse_matmul(a, b,
                           transpose_a=transpose_a,
                           transpose_b=transpose_b,
                           a_is_sparse=a_is_sparse,
                           b_is_sparse=b_is_sparse,
                           name=name)
    else:
      return gen_math_ops._mat_mul(a, b,
                                   transpose_a=transpose_a,
                                   transpose_b=transpose_b,
                                   name=name)
Esempio n. 3
0
def benchmark_matmul(shape, n, use_gpu=False):
    """Benchmark for matrix multiplication using tf.matmul."""
    transpose_b = (shape[0] != shape[1])
    m = random_ops.random_uniform(shape)
    if use_gpu:
        m = m.as_gpu_tensor()
        # Warm up the GPU - the very first kernel invocation
        # seems to require a bunch of setup.
        math_ops.matmul(m, m, transpose_b=transpose_b)

    def label(s):
        return "MatMul {}: {:30s}".format(shape, s)

    if not use_gpu:
        a = m.as_cpu_tensor().numpy()
        b = a.T if transpose_b else a
        with timer(label("np.dot"), iters=n) as iters:
            for _ in iters:
                np.dot(a, b)

    with timer(label("tf.matmul"), iters=n) as iters:
        for _ in iters:
            math_ops.matmul(m, m, transpose_b=transpose_b)

    with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
        for _ in iters:
            gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)

    # pylint: disable=protected-access
    input_handles = [m._handle, m._handle]
    ctx_handle = context.context()._handle
    # pylint: enable=protected-access
    attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
             m.dtype.as_datatype_enum)
    with timer(label("TFE_Py_Execute"), iters=n) as iters:
        for _ in iters:
            pywrap_tensorflow.TFE_DeleteTensorHandle(
                pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
                                                 input_handles, attrs, 1)[0])

    f = function.defun(math_ops.matmul)
    with timer(label("defun(tf.matmul)"), iters=n) as iters:
        for _ in iters:
            f(m, m, transpose_b=transpose_b)
Esempio n. 4
0
def benchmark_matmul(shape, n, use_gpu=False):
  """Benchmark for matrix multiplication using tf.matmul."""
  transpose_b = (shape[0] != shape[1])
  m = random_ops.random_uniform(shape)
  if use_gpu:
    m = m.as_gpu_tensor()
    # Warm up the GPU - the very first kernel invocation
    # seems to require a bunch of setup.
    math_ops.matmul(m, m, transpose_b=transpose_b)

  def label(s):
    return "MatMul {}: {:30s}".format(shape, s)

  if not use_gpu:
    a = m.as_cpu_tensor().numpy()
    b = a.T if transpose_b else a
    with timer(label("np.dot"), iters=n) as iters:
      for _ in iters:
        np.dot(a, b)

  with timer(label("tf.matmul"), iters=n) as iters:
    for _ in iters:
      math_ops.matmul(m, m, transpose_b=transpose_b)

  with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
    for _ in iters:
      gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)

  # pylint: disable=protected-access
  input_handles = [m._handle, m._handle]
  ctx_handle = context.context()._handle
  # pylint: enable=protected-access
  attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
           m.dtype.as_datatype_enum)
  with timer(label("TFE_Py_Execute"), iters=n) as iters:
    for _ in iters:
      pywrap_tensorflow.TFE_DeleteTensorHandle(
          pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
                                           input_handles, attrs, 1)[0])

  f = function.defun(math_ops.matmul)
  with timer(label("defun(tf.matmul)"), iters=n) as iters:
    for _ in iters:
      f(m, m, transpose_b=transpose_b)
Esempio n. 5
0
def _MatMulGrad(op, grad):
  """Gradient for MatMul."""

  t_a = op.get_attr("transpose_a")
  t_b = op.get_attr("transpose_b")
  a = math_ops.conj(op.inputs[0])
  b = math_ops.conj(op.inputs[1])
  # pylint: disable=protected-access
  if not t_a and not t_b:
    grad_a = gen_math_ops._mat_mul(grad, b, transpose_b=True)
    grad_b = gen_math_ops._mat_mul(a, grad, transpose_a=True)
  elif not t_a and t_b:
    grad_a = gen_math_ops._mat_mul(grad, b)
    grad_b = gen_math_ops._mat_mul(grad, a, transpose_a=True)
  elif t_a and not t_b:
    grad_a = gen_math_ops._mat_mul(b, grad, transpose_b=True)
    grad_b = gen_math_ops._mat_mul(a, grad)
  elif t_a and t_b:
    grad_a = gen_math_ops._mat_mul(b, grad, transpose_a=True, transpose_b=True)
    grad_b = gen_math_ops._mat_mul(grad, a, transpose_a=True, transpose_b=True)
  # pylint: enable=protected-access
  return grad_a, grad_b
 def func():
   gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
Esempio n. 7
0
 def func():
   gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)