Beispiel #1
0
def enable_eager_execution():
    """Enables, for the rest of the lifetime of this program, eager execution.

  If not called immediately on startup risks creating breakage and bugs. Calling
  this method more than once in the same process will lead to an exception.

  Example:
  ```python
  # Before eager execution is enabled, `Tensor`s are symbolic and do not hold
  # concrete values (they are to be executed in a `tf.Session`).
  assert not hasattr(tf.multiply(6, 7), "numpy")

  tfe.enable_eager_execution()

  # After eager execution is enabled, operations are executed as they are
  # defined and `Tensor`s hold concrete values, which can be accessed as
  # `numpy.ndarray`s through the `numpy()` method.
  assert tf.multiply(6, 7).numpy() == 42
  ```

  Raises:
    ValueError: If this method has already been invoked in the current process.
  """
    global _default_mode
    if _default_mode == EAGER_MODE:
        func_name = (
            "tfe." +
            tf_inspect.getframeinfo(tf_inspect.currentframe()).function)
        raise ValueError(
            "Do not call %s more than once in the same process. Note eager-mode "
            "methods such as tfe.run() also call %s." % (func_name, func_name))
    _default_mode = EAGER_MODE
Beispiel #2
0
    def createAndRunGraphWithWhileLoop(self):
        """Create and run a TensorFlow Graph with a while loop to generate dumps."""

        self.dump_root = self.get_temp_dir()
        self.curr_file_path = os.path.abspath(
            tf_inspect.getfile(tf_inspect.currentframe()))

        # Run a simple TF graph to generate some debug dumps that can be used in
        # source annotation.
        with session.Session() as sess:
            loop_body = lambda i: math_ops.add(i, 2)
            self.traceback_first_line = line_number_above()

            loop_cond = lambda i: math_ops.less(i, 16)

            i = constant_op.constant(10, name="i")
            loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(run_options,
                                    sess.graph,
                                    debug_urls=["file://%s" % self.dump_root])
            run_metadata = config_pb2.RunMetadata()
            sess.run(loop, options=run_options, run_metadata=run_metadata)

            self.dump = debug_data.DebugDumpDir(
                self.dump_root, partition_graphs=run_metadata.partition_graphs)
            self.dump.set_python_graph(sess.graph)
  def createAndRunGraphWithWhileLoop(self):
    """Create and run a TensorFlow Graph with a while loop to generate dumps."""

    self.dump_root = self.get_temp_dir()
    self.curr_file_path = os.path.abspath(
        tf_inspect.getfile(tf_inspect.currentframe()))

    # Run a simple TF graph to generate some debug dumps that can be used in
    # source annotation.
    with session.Session() as sess:
      loop_body = lambda i: math_ops.add(i, 2)
      self.traceback_first_line = line_number_above()

      loop_cond = lambda i: math_ops.less(i, 16)

      i = constant_op.constant(10, name="i")
      loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
      run_metadata = config_pb2.RunMetadata()
      sess.run(loop, options=run_options, run_metadata=run_metadata)

      self.dump = debug_data.DebugDumpDir(
          self.dump_root, partition_graphs=run_metadata.partition_graphs)
      self.dump.set_python_graph(sess.graph)
Beispiel #4
0
def enable_eager_execution():
  """Enables, for the rest of the lifetime of this program, eager execution.

  If not called immediately on startup risks creating breakage and bugs. Calling
  this method more than once in the same process will lead to an exception.

  Example:
  ```python
  # Before eager execution is enabled, `Tensor`s are symbolic and do not hold
  # concrete values (they are to be executed in a `tf.Session`).
  assert not hasattr(tf.multiply(6, 7), "numpy")

  tfe.enable_eager_execution()

  # After eager execution is enabled, operations are executed as they are
  # defined and `Tensor`s hold concrete values, which can be accessed as
  # `numpy.ndarray`s through the `numpy()` method.
  assert tf.multiply(6, 7).numpy() == 42
  ```

  Raises:
    ValueError: If this method has already been invoked in the current process.
  """
  global _default_mode
  if _default_mode == EAGER_MODE:
    func_name = (
        "tfe." + tf_inspect.getframeinfo(tf_inspect.currentframe()).function)
    raise ValueError(
        "Do not call %s more than once in the same process. Note eager-mode "
        "methods such as tfe.run() also call %s." % (func_name, func_name))
  _default_mode = EAGER_MODE
Beispiel #5
0
def _call_location(outer=False):
    """Returns call location given level up from current call."""
    # Two up: <_call_location>, <_call_location's caller>
    f = tf_inspect.currentframe().f_back.f_back
    parent = f.f_back
    if outer and parent is not None:
        f = parent
    return '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
Beispiel #6
0
def _call_location():
  """Returns call location given level up from current call."""
  frame = tf_inspect.currentframe()
  if frame:
    # CPython internals are available, use them for performance.
    # walk back two frames to get to deprecated function caller.
    first_frame = frame.f_back
    second_frame = first_frame.f_back
    frame = second_frame if second_frame else first_frame
    return '%s:%d' % (frame.f_code.co_filename, frame.f_lineno)
  else:
    # Slow fallback path
    stack = tf_inspect.stack(0)  # 0 avoids generating unused context
    entry = stack[2]
    return '%s:%d' % (entry[1], entry[2])
Beispiel #7
0
def _call_location():
    """Returns call location given level up from current call."""
    frame = tf_inspect.currentframe()
    if frame:
        # CPython internals are available, use them for performance.
        # walk back two frames to get to deprecated function caller.
        first_frame = frame.f_back
        second_frame = first_frame.f_back
        frame = second_frame if second_frame else first_frame
        return '%s:%d' % (frame.f_code.co_filename, frame.f_lineno)
    else:
        # Slow fallback path
        stack = tf_inspect.stack(0)  # 0 avoids generating unused context
        entry = stack[2]
        return '%s:%d' % (entry[1], entry[2])
Beispiel #8
0
    def createAndRunGraphHelper(self):
        """Create and run a TensorFlow Graph to generate debug dumps.

    This is intentionally done in separate method, to make it easier to test
    the stack-top mode of source annotation.
    """

        self.dump_root = self.get_temp_dir()
        self.curr_file_path = os.path.abspath(
            tf_inspect.getfile(tf_inspect.currentframe()))

        # Run a simple TF graph to generate some debug dumps that can be used in
        # source annotation.
        with session.Session() as sess:
            self.u_init = constant_op.constant(np.array([[5.0, 3.0],
                                                         [-1.0, 0.0]]),
                                               shape=[2, 2],
                                               name="u_init")
            self.u_init_line_number = line_number_above()

            self.u = variables.Variable(self.u_init, name="u")
            self.u_line_number = line_number_above()

            self.v_init = constant_op.constant(np.array([[2.0], [-1.0]]),
                                               shape=[2, 1],
                                               name="v_init")
            self.v_init_line_number = line_number_above()

            self.v = variables.Variable(self.v_init, name="v")
            self.v_line_number = line_number_above()

            self.w = math_ops.matmul(self.u, self.v, name="w")
            self.w_line_number = line_number_above()

            self.evaluate(self.u.initializer)
            self.evaluate(self.v.initializer)

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(run_options,
                                    sess.graph,
                                    debug_urls=["file://%s" % self.dump_root])
            run_metadata = config_pb2.RunMetadata()
            sess.run(self.w, options=run_options, run_metadata=run_metadata)

            self.dump = debug_data.DebugDumpDir(
                self.dump_root, partition_graphs=run_metadata.partition_graphs)
            self.dump.set_python_graph(sess.graph)
Beispiel #9
0
def enable_eager_execution():
    """Enables, for the rest of the lifetime of this program, eager execution.

  If not called immediately on startup risks creating breakage and bugs. Calling
  this method more than once in the same process will lead to an exception.

  Raises:
    ValueError: If this method has already been invoked in the current process.
  """
    global _default_mode
    if _default_mode == EAGER_MODE:
        func_name = (
            "tfe." +
            tf_inspect.getframeinfo(tf_inspect.currentframe()).function)
        raise ValueError(
            "Do not call %s more than once in the same process. Note eager-mode "
            "methods such as tfe.run() also call %s." % (func_name, func_name))
    _default_mode = EAGER_MODE
  def createAndRunGraphHelper(self):
    """Create and run a TensorFlow Graph to generate debug dumps.

    This is intentionally done in separate method, to make it easier to test
    the stack-top mode of source annotation.
    """

    self.dump_root = self.get_temp_dir()
    self.curr_file_path = os.path.abspath(
        tf_inspect.getfile(tf_inspect.currentframe()))

    # Run a simple TF graph to generate some debug dumps that can be used in
    # source annotation.
    with session.Session() as sess:
      self.u_init = constant_op.constant(
          np.array([[5.0, 3.0], [-1.0, 0.0]]), shape=[2, 2], name="u_init")
      self.u_init_line_number = line_number_above()

      self.u = variables.Variable(self.u_init, name="u")
      self.u_line_number = line_number_above()

      self.v_init = constant_op.constant(
          np.array([[2.0], [-1.0]]), shape=[2, 1], name="v_init")
      self.v_init_line_number = line_number_above()

      self.v = variables.Variable(self.v_init, name="v")
      self.v_line_number = line_number_above()

      self.w = math_ops.matmul(self.u, self.v, name="w")
      self.w_line_number = line_number_above()

      sess.run(self.u.initializer)
      sess.run(self.v.initializer)

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
      run_metadata = config_pb2.RunMetadata()
      sess.run(self.w, options=run_options, run_metadata=run_metadata)

      self.dump = debug_data.DebugDumpDir(
          self.dump_root, partition_graphs=run_metadata.partition_graphs)
      self.dump.set_python_graph(sess.graph)
Beispiel #11
0
def _call_location():
  """Extracts the caller filename and line number as a string.

  Returns:
    A string describing the caller source location.
  """
  frame = tf_inspect.currentframe()
  assert frame.f_back.f_code.co_name == '_tfmw_add_deprecation_warning', (
      'This function should be called directly from '
      '_tfmw_add_deprecation_warning, as the caller is identified '
      'heuristically by chopping off the top stack frames.')

  # We want to get stack frame 3 frames up from current frame,
  # i.e. above __getattr__, _tfmw_add_deprecation_warning,
  # and _call_location calls.
  for _ in range(3):
    parent = frame.f_back
    if parent is None:
      break
    frame = parent
  return '{}:{}'.format(frame.f_code.co_filename, frame.f_lineno)
Beispiel #12
0
from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib

if __name__ == '__main__':
    doc_generator = generate_lib.DocGenerator()
    doc_generator.add_output_dir_argument()
    doc_generator.add_src_dir_argument()

    # This doc generator works on the TensorFlow codebase. Since this script lives
    # at tensorflow/tools/docs, and all code is defined somewhere inside
    # tensorflow/, we can compute the base directory (two levels up), which is
    # valid unless we're trying to apply this to a different code base, or are
    # moving the script around.
    script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
    default_base_dir = os.path.join(script_dir, '..', '..')
    doc_generator.add_base_dir_argument(default_base_dir)

    flags = doc_generator.parse_known_args()

    # tf_debug is not imported with tf, it's a separate module altogether
    doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])

    doc_generator.set_do_not_descend_map({
        'tf': ['cli', 'lib', 'wrappers'],
        'tf.contrib': [
            'compiler',
            'factorization',
            'grid_rnn',
            'labeled_tensor',
Beispiel #13
0
 def __enter__(self):
     self.frame = tf_inspect.currentframe()
     bindings = self.frame.f_back.f_globals
     self.old = {k: bindings.get(k, None) for k in self.names.keys()}
     bindings.update(self.names)
 def testCurrentFrame(self):
     self.assertEqual(inspect.currentframe(), tf_inspect.currentframe())
 def testCurrentFrame(self):
   self.assertEqual(inspect.currentframe(), tf_inspect.currentframe())
Beispiel #16
0
import tensorflow as tf

from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib

if __name__ == '__main__':
  doc_generator = generate_lib.DocGenerator()
  doc_generator.add_output_dir_argument()
  doc_generator.add_src_dir_argument()

  # This doc generator works on the TensorFlow codebase. Since this script lives
  # at tensorflow/tools/docs, and all code is defined somewhere inside
  # tensorflow/, we can compute the base directory (two levels up), which is
  # valid unless we're trying to apply this to a different code base, or are
  # moving the script around.
  script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
  default_base_dir = os.path.join(script_dir, '..', '..')
  doc_generator.add_base_dir_argument(default_base_dir)

  flags = doc_generator.parse_known_args()

  # Suppress documentation of some symbols that users should never use.
  del tf.layers.Layer.inbound_nodes
  del tf.layers.Layer.outbound_nodes

  # tf_debug is not imported with tf, it's a separate module altogether
  doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])

  sys.exit(doc_generator.build(flags))
Beispiel #17
0
def getmethodclass(m):
  """Resolves a function's owner, e.g. a method's class.

  Note that this returns the object that the function was retrieved from, not
  necessarily the class where it was defined.

  This function relies on Python stack frame support in the interpreter, and
  has the same limitations that inspect.currentframe.

  Limitations. This function will only work correctly if the owned class is
  visible in the caller's global or local variables.

  Args:
    m: A user defined function

  Returns:
    The class that this function was retrieved from, or None if the function
    is not an object or class method, or the class that owns the object or
    method is not visible to m.

  Raises:
    ValueError: if the class could not be resolved for any unexpected reason.
  """

  # Callable objects: return their own class.
  if (not hasattr(m, '__name__') and hasattr(m, '__class__') and
      hasattr(m, '__call__')):
    if isinstance(m.__class__, six.class_types):
      return m.__class__

  # Instance method and class methods: should be bound to a non-null "self".
  # If self is a class, then it's a class method.
  if hasattr(m, '__self__'):
    if m.__self__:
      if tf_inspect.isclass(m.__self__):
        return m.__self__
      return type(m.__self__)

  # Class, static and unbound methods: search all defined classes in any
  # namespace. This is inefficient but more robust method.
  owners = []
  caller_frame = tf_inspect.currentframe().f_back
  try:
    # TODO(mdan): This doesn't consider cell variables.
    # TODO(mdan): This won't work if the owner is hidden inside a container.
    # Cell variables may be pulled using co_freevars and the closure.
    for v in itertools.chain(caller_frame.f_locals.values(),
                             caller_frame.f_globals.values()):
      if hasattr(v, m.__name__):
        candidate = getattr(v, m.__name__)
        # Py2 methods may be bound or unbound, extract im_func to get the
        # underlying function.
        if hasattr(candidate, 'im_func'):
          candidate = candidate.im_func
        if hasattr(m, 'im_func'):
          m = m.im_func
        if candidate is m:
          owners.append(v)
  finally:
    del caller_frame

  if owners:
    if len(owners) == 1:
      return owners[0]

    # If multiple owners are found, and are not subclasses, raise an error.
    owner_types = tuple(o if tf_inspect.isclass(o) else type(o) for o in owners)
    for o in owner_types:
      if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)):
        return o
    raise ValueError('Found too many owners of %s: %s' % (m, owners))

  return None
Beispiel #18
0
 def __enter__(self):
   self.frame = tf_inspect.currentframe()
   bindings = self.frame.f_back.f_globals
   self.old = {k: bindings.get(k, None) for k in self.names.keys()}
   bindings.update(self.names)