Ejemplo n.º 1
0
def execute(op_name, num_outputs, inputs, attrs=None, name=None):
  """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    name: Customized name for the operation.

  Returns:
    None if there are no outputs, a single Tensor object if there is one output
    and a list of Tensor objects if there are multiple outputs.

  Raises:
    An exception on error.
  """
  ctx = context.get_default_context()
  # TODO(apassos) move this to convert_to_tensor
  inputs = [ag_core.getval(x) for x in inputs]
  # pylint: disable=protected-access
  input_handles = [c._handle for c in inputs]
  device_name = ctx.device_name
  try:
    outh = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                            str(op_name), input_handles, attrs,
                                            num_outputs)
    # pylint: enable=protected-access
  except core._NotOkStatusException as e:  # pylint: disable=protected-access
    if name is not None:
      message = e.message + " name: " + name
    else:
      message = e.message
    raise core._status_to_exception(e.code, message)  # pylint: disable=protected-access
  # pylint: enable=protected-access

  tensors = [tensor._tensor_from_handle(x) for x in outh]  # pylint: disable=protected-access
  # TODO(alive, cais): Use the execution callback mechanism.
  if core.active_trace() is not None:
    trace_name = name if name else op_name
    for t in tensors:
      # pylint: disable=protected-access
      core.active_trace().record_tensor(trace_name,
                                        ops.tensor_id(t),
                                        t._device_name(),
                                        t.shape.num_elements())
      # pylint: enable=protected-access

  # TODO(cais): Optimize this, perhaps by replacing this execute function with
  # a different one when there are execution callback(s).
  for callback in ctx.post_execution_callbacks:
    callback(op_name, name, attrs, inputs, tensors)

  return tensors
Ejemplo n.º 2
0
def execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
    """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    ctx: The value of context.context().
    name: Customized name for the operation.

  Returns:
    List of output Tensor objects. The list is empty if there are no outputs

  Raises:
    An exception on error.
  """
    device_name = ctx.device_name
    # pylint: disable=protected-access
    try:
        tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                                   op_name, inputs, attrs,
                                                   num_outputs)
    except core._NotOkStatusException as e:
        if name is not None:
            message = e.message + " name: " + name
        else:
            message = e.message
        six.raise_from(core._status_to_exception(e.code, message), None)

    # TODO(alive, cais): Use the execution callback mechanism.
    if core.active_trace() is not None:
        for t in tensors:
            core.active_trace().record_tensor(op_name, ops.tensor_id(t),
                                              t.device, t.shape.num_elements())
    # pylint: enable=protected-access

    # TODO(cais): Optimize this, perhaps by replacing this execute function with
    # a different one when there are execution callback(s).
    for callback in ctx.post_execution_callbacks:
        callback(op_name, name, attrs, inputs, tensors)

    return tensors
Ejemplo n.º 3
0
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
    """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    ctx: The value of context.context().
    name: Customized name for the operation.

  Returns:
    List of output Tensor objects. The list is empty if there are no outputs

  Raises:
    An exception on error.
  """
    device_name = ctx.device_name
    # pylint: disable=protected-access
    try:
        ctx.ensure_initialized()
        tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                                   op_name, inputs, attrs,
                                                   num_outputs)
    except core._NotOkStatusException as e:
        if name is not None:
            message = e.message + " name: " + name
        else:
            message = e.message
        six.raise_from(core._status_to_exception(e.code, message), None)
    except TypeError as e:
        keras_symbolic_tensors = [
            x for x in inputs if ops._is_keras_symbolic_tensor(x)
        ]
        if keras_symbolic_tensors:
            raise core._SymbolicException(
                "Inputs to eager execution function cannot be Keras symbolic "
                "tensors, but found {}".format(keras_symbolic_tensors))
        raise e
    # pylint: enable=protected-access
    return tensors
Ejemplo n.º 4
0
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
  """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    ctx: The value of context.context().
    name: Customized name for the operation.

  Returns:
    List of output Tensor objects. The list is empty if there are no outputs

  Raises:
    An exception on error.
  """
  device_name = ctx.device_name
  # pylint: disable=protected-access
  try:
    tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                               op_name, inputs, attrs,
                                               num_outputs)
  except core._NotOkStatusException as e:
    if name is not None:
      message = e.message + " name: " + name
    else:
      message = e.message
    six.raise_from(core._status_to_exception(e.code, message), None)
  except TypeError as e:
    if any(ops._is_keras_symbolic_tensor(x) for x in inputs):
      if any(isinstance(x, ops.EagerTensor) for x in inputs):
        raise TypeError("You are attempting to mix computation of symbolic "
                        "Tensors (computation rooted at tf.keras.Input()) "
                        "and concrete values. This is not supported. "
                        "If you need this support, file an issue on the "
                        "TensorFlow GitHub repository.")
      raise core._SymbolicException
    raise e
  # pylint: enable=protected-access
  return tensors
Ejemplo n.º 5
0
def benchmark_matmul(shape, n, use_gpu=False):
    """Benchmark for matrix multiplication using tf.matmul."""
    transpose_b = (shape[0] != shape[1])
    m = random_ops.random_uniform(shape)
    if use_gpu:
        m = m.as_gpu_tensor()
        # Warm up the GPU - the very first kernel invocation
        # seems to require a bunch of setup.
        math_ops.matmul(m, m, transpose_b=transpose_b)

    def label(s):
        return "MatMul {}: {:30s}".format(shape, s)

    if not use_gpu:
        a = m.as_cpu_tensor().numpy()
        b = a.T if transpose_b else a
        with timer(label("np.dot"), iters=n) as iters:
            for _ in iters:
                np.dot(a, b)

    with timer(label("tf.matmul"), iters=n) as iters:
        for _ in iters:
            math_ops.matmul(m, m, transpose_b=transpose_b)

    with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
        for _ in iters:
            gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)

    # pylint: disable=protected-access
    input_handles = [m._handle, m._handle]
    ctx_handle = context.context()._handle
    # pylint: enable=protected-access
    attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
             m.dtype.as_datatype_enum)
    with timer(label("TFE_Py_Execute"), iters=n) as iters:
        for _ in iters:
            pywrap_tensorflow.TFE_DeleteTensorHandle(
                pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
                                                 input_handles, attrs, 1)[0])

    f = function.defun(math_ops.matmul)
    with timer(label("defun(tf.matmul)"), iters=n) as iters:
        for _ in iters:
            f(m, m, transpose_b=transpose_b)
Ejemplo n.º 6
0
def inf_nan_callback(op_type,
                     inputs,
                     attrs,
                     outputs,
                     op_name,
                     check_inf=True,
                     check_nan=True,
                     action=_DEFAULT_CALLBACK_ACTION):
    """An execution callback that checks for `inf`s and `nan`s in output tensors.

  This callback can be used with `tfe.add_execute_callback` to check for invalid
  numeric values. E.g.,
  ```python
  tfe.add_execute_callback(tfe.inf_nan_callback)
  ```

  Args:
    op_type: Name of the TFE operation type (e.g., `MatMul`).
    inputs: The `list` of input tensors to the operation, currently unused by
      this callback.
    attrs: Attributes of the TFE operation, as a tuple of alternating attribute
      names and attribute values.
    outputs: The `list` of output tensors from the operation, checked by this
      callback for `inf` and `nan` values.
    op_name: Name of the TFE operation. This name is set by client and can be
      `None` if it unset.
    check_inf: (`bool`) Whether this callback should check for `inf` values in
      the output tensor values.
    check_nan: (`bool`) Whether this callback should check for `nan` values in
      the output tensor values.
    action: (`str`) Action to be taken by the callback when `inf` or `nan`
      values are detected. Possible values {"raise", "warn", "print"}
      `"raise"`: Raise a `InfOrNanError`.
      `"warn"`: Log a warning using `tf.logging.warn`.
      `"print"`: Print a message to `sys.stdout`.

  Raises:
    InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
      `action` is `"raise"`.
    ValueError: iff the value of `action` is invalid.
  """
    del attrs, inputs  # Not used.

    ctx = context.get_default_context()

    for index, output in enumerate(outputs):
        if not output.dtype.is_numpy_compatible:
            continue

        numpy_dtype = output.dtype.as_numpy_dtype
        if (np.issubdtype(numpy_dtype, np.floating)
                or np.issubdtype(numpy_dtype, np.complex)
                or np.issubdtype(numpy_dtype, np.integer)):
            try:
                check_numerics_op_attrs = ("message",
                                           "Eager-mode inf/nan check", "T",
                                           outputs[0].dtype.as_datatype_enum)
                # TODO(cais): Consider moving this into execute.py.
                # pylint: disable=protected-access
                pywrap_tensorflow.TFE_Py_Execute(ctx._handle, output.device,
                                                 "CheckNumerics", [output],
                                                 check_numerics_op_attrs, 1)
                # pylint: enable=protected-access
            except core._NotOkStatusException:  # pylint: disable=protected-access
                value = output.numpy()
                inf_detected = np.any(np.isinf(value)) and check_inf
                nan_detected = np.any(np.isnan(value)) and check_nan
                if not inf_detected and not nan_detected:
                    continue

                error = InfOrNanError(op_type, op_name, index, len(outputs),
                                      value)
                if action == "print":
                    print("Warning: %s" % str(error))
                elif action == "warn":
                    logging.warn(str(error))
                elif action == "raise":
                    raise error
                else:
                    raise ValueError(
                        "Invalid action for inf_nan_callback: %s. Valid actions are: "
                        "{print | warn | raise}" % action)
Ejemplo n.º 7
0
 def func():
     pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul",
                                      inputs, attrs, 1)
Ejemplo n.º 8
0
 def f():
     pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity",
                                      inputs, attrs, 1)