def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
  """Auto profile and advise.

    Builds profiles and automatically check anomalies of various
    aspects. For more details:
    https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md

  Args:
    graph: required tf.Graph.
    run_meta: optional tensorflow.RunMetadata proto. It is necessary to
        to support run time information profiling, such as time and memory.
    options: see ALL_ADVICE example above. Default checks everything.
  Returns:
    Returns AdviceProto proto
  """
  if options == _DEFAULT_ADVISE_OPTIONS:
    options = ALL_ADVICE.copy()

  # pylint: disable=protected-access
  op_log = tfprof_logger.merge_default_with_oplog(
      graph, None, run_meta, add_trace=True)
  # pylint: enable=protected-access

  run_meta_str = run_meta.SerializeToString() if run_meta else b''

  opts = _build_advisor_options(options)
  ret = tfprof_output_pb2.AdviceProto()
  ret.ParseFromString(
      print_mdl.PrintModelAnalysis(
          graph.as_graph_def(add_shapes=True).SerializeToString(),
          run_meta_str,
          op_log.SerializeToString(),
          'advise'.encode('utf-8'),
          opts.SerializeToString()))
  return ret
def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
    """Auto profile and advise.

    Builds profiles and automatically check anomalies of various
    aspects. For more details:
    https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md

  Args:
    graph: required tf.Graph.
    run_meta: optional tensorflow.RunMetadata proto. It is necessary to
        to support run time information profiling, such as time and memory.
    options: see ALL_ADVICE example above. Default checks everything.
  Returns:
    Returns AdviceProto proto
  """
    if options == _DEFAULT_ADVISE_OPTIONS:
        options = ALL_ADVICE.copy()

    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(graph,
                                                    None,
                                                    run_meta,
                                                    add_trace=True)
    # pylint: enable=protected-access

    run_meta_str = run_meta.SerializeToString() if run_meta else b''

    opts = _build_advisor_options(options)
    ret = tfprof_output_pb2.AdviceProto()
    ret.ParseFromString(
        print_mdl.PrintModelAnalysis(
            graph.as_graph_def(add_shapes=True).SerializeToString(),
            run_meta_str, op_log.SerializeToString(), 'advise'.encode('utf-8'),
            opts.SerializeToString()))
    return ret
Exemple #3
0
    def add_step(self, step, run_meta):
        """Add statistics of a step.

    Args:
      step: int, An id used to group one or more different `run_meta` together.
          When profiling with the profile_xxx APIs, user can use the `step`
          id in the `options` to profile these `run_meta` together.
      run_meta: RunMetadata proto that contains statistics of a session run.
    """
        # pylint: disable=protected-access
        op_log = tfprof_logger.merge_default_with_oplog(self._graph,
                                                        run_meta=run_meta)
        # pylint: enable=protected-access
        # TODO(xpan): P1: Better to find the current graph.
        self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
                                           run_meta.SerializeToString(),
                                           op_log.SerializeToString())
  def add_step(self, step, run_meta):
    """Add statistics of a step.

    Args:
      step: int, An id used to group one or more different `run_meta` together.
          When profiling with the profile_xxx APIs, user can use the `step`
          id in the `options` to profile these `run_meta` together.
      run_meta: RunMetadata proto that contains statistics of a session run.
    """
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(
        self._graph, run_meta=run_meta)
    # pylint: enable=protected-access
    # TODO(xpan): P1: Better to find the current graph.
    self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
                                       run_meta.SerializeToString(),
                                       op_log.SerializeToString())
    def add_step(self, step, run_meta):
        """Add statistics of a step.

    Args:
      step: int, A step used to identify the RunMetadata. Must be different
         across different AddStep() calls.
      run_meta: RunMetadata proto that contains statistics of a session run.
    """
        # pylint: disable=protected-access
        op_log = tfprof_logger.merge_default_with_oplog(self._graph,
                                                        run_meta=run_meta)
        # pylint: enable=protected-access
        # TODO (xpan): P1: Better to find the current graph. id:3461 gh:3462
        self._coverage = print_mdl.AddStep(
            step,
            self._graph.as_graph_def(add_shapes=True).SerializeToString(),
            run_meta.SerializeToString(), op_log.SerializeToString())
  def add_step(self, step, run_meta):
    """Add statistics of a step.

    Args:
      step: int, A step used to identify the RunMetadata. Must be different
         across different AddStep() calls.
      run_meta: RunMetadata proto that contains statistics of a session run.
    """
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(
        self._graph, run_meta=run_meta)
    # pylint: enable=protected-access
    # TODO(xpan): P1: Better to find the current graph.
    self._coverage = print_mdl.AddStep(
        step,
        self._graph.as_graph_def(add_shapes=True).SerializeToString(),
        run_meta.SerializeToString(), op_log.SerializeToString())
  def __init__(self, graph, op_log=None):
    """Constructor.

    Args:
      graph: tf.Graph.
      op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
          extra op types.
    """
    self._coverage = 0.0
    self._graph = graph
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(
        self._graph, op_log=op_log)
    # pylint: enable=protected-access

    print_mdl.NewProfiler(
        self._graph.as_graph_def(add_shapes=True).SerializeToString(),
        op_log.SerializeToString())
  def __init__(self, graph=None, op_log=None):
    """Constructor.

    Args:
      graph: tf.Graph. If None and eager execution is not enabled, use default
        graph.
      op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
        extra op types.
    """
    if not graph and not context.executing_eagerly():
      graph = ops.get_default_graph()
    self._coverage = 0.0
    self._graph = graph
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(self._graph, op_log=op_log)
    # pylint: enable=protected-access
    print_mdl.NewProfiler(
        _graph_string(self._graph), op_log.SerializeToString())
    def __init__(self, graph, op_log=None):
        """Constructor.

    Args:
      graph: tf.Graph.
      op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
          extra op types.
    """
        self._coverage = 0.0
        self._graph = graph
        # pylint: disable=protected-access
        op_log = tfprof_logger.merge_default_with_oplog(self._graph,
                                                        op_log=op_log)
        # pylint: enable=protected-access

        print_mdl.NewProfiler(
            self._graph.as_graph_def(add_shapes=True).SerializeToString(),
            op_log.SerializeToString())
  def __init__(self, graph=None, op_log=None):
    """Constructor.

    Args:
      graph: tf.Graph. If None and eager execution is not enabled, use
          default graph.
      op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
          extra op types.
    """
    if not graph and context.in_graph_mode():
      graph = ops.get_default_graph()
    self._coverage = 0.0
    self._graph = graph
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(
        self._graph, op_log=op_log)
    # pylint: enable=protected-access
    print_mdl.NewProfiler(
        _graph_string(self._graph), op_log.SerializeToString())
Exemple #11
0
def profile(graph=None,
            run_meta=None,
            op_log=None,
            cmd='scope',
            options=_DEFAULT_PROFILE_OPTIONS):
    """Profile model.

    Tutorials and examples can be found in:
    https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md

  Args:
    graph: tf.Graph. If None and eager execution is not enabled, use
        default graph.
    run_meta: optional tensorflow.RunMetadata proto. It is necessary to
        to support run time information profiling, such as time and memory.
    op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
        graph nodes with op_log. "types" allow user to flexibly group and
        account profiles using options['accounted_type_regexes'].
    cmd: string. Either 'op', 'scope', 'graph' or 'code'.
        'op' view organizes profile using operation type. (e.g. MatMul)
        'scope' view organizes profile using graph node name scope.
        'graph' view organizes profile using graph node inputs/outputs.
        'code' view organizes profile using Python call stack.
    options: A dict of options. See core/profiler/g3doc/options.md.
  Returns:
    If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
    If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
    Side effect: stdout/file/timeline.json depending on options['output']
  """
    if not graph and not context.executing_eagerly():
        graph = ops.get_default_graph()

    if options == _DEFAULT_PROFILE_OPTIONS:
        options = (option_builder.ProfileOptionBuilder.
                   trainable_variables_parameter())
    # pylint: disable=protected-access
    op_log = tfprof_logger.merge_default_with_oplog(graph,
                                                    op_log,
                                                    run_meta,
                                                    add_trace=cmd == 'code')
    # pylint: enable=protected-access

    opts = _build_options(options)

    run_meta_str = run_meta.SerializeToString() if run_meta else b''

    graph_str = _graph_string(graph)

    if cmd == 'code' or cmd == 'op':
        tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
        ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
                                           op_log.SerializeToString(),
                                           cmd.encode('utf-8'),
                                           opts.SerializeToString())
        try:
            tfprof_node.ParseFromString(ret)
        except message.DecodeError as e:
            sys.stderr.write('Cannot parse returned proto: %s.\n' % e)

    elif cmd == 'graph' or cmd == 'scope':
        tfprof_node = tfprof_output_pb2.GraphNodeProto()
        ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
                                           op_log.SerializeToString(),
                                           cmd.encode('utf-8'),
                                           opts.SerializeToString())
        try:
            tfprof_node.ParseFromString(ret)
        except message.DecodeError as e:
            sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
    else:
        raise errors.InvalidArgumentError(None, None,
                                          'unknown cmd: %s\n' % cmd)

    return tfprof_node
Exemple #12
0
def _send_call_tracebacks(destinations,
                          origin_stack,
                          is_eager_execution=False,
                          call_key=None,
                          graph=None,
                          send_source=True):
    """Send the tracebacks of a TensorFlow execution call.

  To gRPC debug server(s). This applies to graph execution (`tf.Session.run()`)
  calls and eager execution calls.

  If `send_source`, also sends the underlying source files outside the
  TensorFlow library.

  Args:
    destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
      e.g., "localhost:4242". If a `list`, gRPC requests containing the same
      `CallTraceback` proto payload will be sent to all the destinations.
    origin_stack: The traceback stack for the origin of the execution call. For
      graph execution, this is the traceback of the `tf.Session.run()`
      invocation. For eager execution, this is the traceback of the Python
      line that executes the eager opertion.
    is_eager_execution: (`bool`) whether an eager execution call (i.e., not a
      `tf.Session.run` or derived methods) is being sent.
    call_key: The key of the execution call, as a string. For graph execution,
      this is a string describing the feeds, fetches (and targets) names of the
      `tf.Session.run` call. For eager execution, this is ignored.
    graph: A Python `tf.Graph` object (i.e., *not* a `tf.GraphDef`), which
      contains op tracebacks, if applicable.
    send_source: Whether the source files involved in the op tracebacks but
      outside the TensorFlow library are to be sent.
  """
    if not isinstance(destinations, list):
        destinations = [destinations]
    # Strip grpc:// prefix, if any is present.
    destinations = [
        dest[len(common.GRPC_URL_PREFIX):]
        if dest.startswith(common.GRPC_URL_PREFIX) else dest
        for dest in destinations
    ]

    call_type = (debug_service_pb2.CallTraceback.EAGER_EXECUTION
                 if is_eager_execution else
                 debug_service_pb2.CallTraceback.GRAPH_EXECUTION)
    graph_traceback = tfprof_logger.merge_default_with_oplog(
        graph, add_trainable_var=False) if graph else None
    call_traceback = debug_service_pb2.CallTraceback(
        call_type=call_type,
        call_key=call_key,
        graph_traceback=graph_traceback,
        graph_version=graph.version if graph else None)

    _format_origin_stack(origin_stack, call_traceback)

    if send_source:
        source_file_paths = set()
        source_file_paths.update(
            _source_file_paths_outside_tensorflow_py_library(
                (log_entry.code_def
                 for log_entry in call_traceback.graph_traceback.log_entries),
                call_traceback.graph_traceback.id_to_string))
        source_file_paths.update(
            _source_file_paths_outside_tensorflow_py_library(
                [call_traceback.origin_stack],
                call_traceback.origin_id_to_string))

        debugged_source_files = []
        for file_path in source_file_paths:
            source_files = debug_pb2.DebuggedSourceFiles()
            _load_debugged_source_file(file_path,
                                       source_files.source_files.add())
            debugged_source_files.append(source_files)

    for destination in destinations:
        channel = grpc.insecure_channel(destination)
        stub = debug_service_pb2_grpc.EventListenerStub(channel)
        stub.SendTracebacks(call_traceback)
        if send_source:
            for path, source_files in zip(source_file_paths,
                                          debugged_source_files):
                if source_files.ByteSize() < grpc_message_length_bytes():
                    stub.SendSourceFiles(source_files)
                else:
                    tf_logging.warn(
                        "The content of the source file at %s is not sent to "
                        "gRPC debug server %s, because the message size exceeds "
                        "gRPC message length limit (%d bytes)." %
                        (path, destination, grpc_message_length_bytes()))
def _send_call_tracebacks(destinations,
                          origin_stack,
                          is_eager_execution=False,
                          call_key=None,
                          graph=None,
                          send_source=True):
  """Send the tracebacks of a TensorFlow execution call.

  To gRPC debug server(s). This applies to graph execution (`tf.Session.run()`)
  calls and eager execution calls.

  If `send_source`, also sends the underlying source files outside the
  TensorFlow library.

  Args:
    destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
      e.g., "localhost:4242". If a `list`, gRPC requests containing the same
      `CallTraceback` proto payload will be sent to all the destinations.
    origin_stack: The traceback stack for the origin of the execution call. For
      graph execution, this is the traceback of the `tf.Session.run()`
      invocation. For eager execution, this is the traceback of the Python
      line that executes the eager opertion.
    is_eager_execution: (`bool`) whether an eager execution call (i.e., not a
      `tf.Session.run` or derived methods) is being sent.
    call_key: The key of the execution call, as a string. For graph execution,
      this is a string describing the feeds, fetches (and targets) names of the
      `tf.Session.run` call. For eager execution, this is ignored.
    graph: A Python `tf.Graph` object (i.e., *not* a `tf.GraphDef`), which
      contains op tracebacks, if applicable.
    send_source: Whether the source files involved in the op tracebacks but
      outside the TensorFlow library are to be sent.
  """
  if not isinstance(destinations, list):
    destinations = [destinations]

  call_type = (debug_service_pb2.CallTraceback.EAGER_EXECUTION
               if is_eager_execution
               else debug_service_pb2.CallTraceback.GRAPH_EXECUTION)
  graph_traceback = tfprof_logger.merge_default_with_oplog(
      graph, add_trainable_var=False) if graph else None
  call_traceback = debug_service_pb2.CallTraceback(
      call_type=call_type, call_key=call_key, graph_traceback=graph_traceback,
      graph_version=graph.version if graph else None)

  _format_origin_stack(origin_stack, call_traceback)

  if send_source:
    source_file_paths = set()
    source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
        (log_entry.code_def for log_entry
         in call_traceback.graph_traceback.log_entries),
        call_traceback.graph_traceback.id_to_string))
    source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
        [call_traceback.origin_stack], call_traceback.origin_id_to_string))

    debugged_source_files = debug_pb2.DebuggedSourceFiles()
    for file_path in source_file_paths:
      _load_debugged_source_file(
          file_path, debugged_source_files.source_files.add())

  for destination in destinations:
    channel = grpc.insecure_channel(destination)
    stub = debug_service_pb2_grpc.EventListenerStub(channel)
    stub.SendTracebacks(call_traceback)
    if send_source:
      stub.SendSourceFiles(debugged_source_files)
def profile(graph,
            run_meta=None,
            op_log=None,
            cmd='scope',
            options=_DEFAULT_PROFILE_OPTIONS):
  """Profile model.

    Tutorials and examples can be found in:
    https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md

  Args:
    graph: required tf.Graph.
    run_meta: optional tensorflow.RunMetadata proto. It is necessary to
        to support run time information profiling, such as time and memory.
    op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
        graph nodes with op_log. "types" allow user to flexibly group and
        account profiles using options['accounted_type_regexes'].
    cmd: string. Either 'op', 'scope', 'graph' or 'code'.
        'op' view organizes profile using operation type. (e.g. MatMul)
        'scope' view organizes profile using graph node name scope.
        'graph' view organizes profile using graph node inputs/outputs.
        'code' view organizes profile using Python call stack.
    options: A dict of options. See core/profiler/g3doc/options.md.
  Returns:
    If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
    If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
    Side effect: stdout/file/timeline.json depending on options['output']
  """
  if options == _DEFAULT_PROFILE_OPTIONS:
    options = (option_builder.ProfileOptionBuilder
               .trainable_variables_parameter())

  # pylint: disable=protected-access
  op_log = tfprof_logger.merge_default_with_oplog(
      graph, op_log, run_meta, add_trace=cmd == 'code')
  # pylint: enable=protected-access

  opts = _build_options(options)

  run_meta_str = run_meta.SerializeToString() if run_meta else b''

  if cmd == 'code' or cmd == 'op':
    tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
    ret = print_mdl.PrintModelAnalysis(
        graph.as_graph_def(add_shapes=True).SerializeToString(),
        run_meta_str,
        op_log.SerializeToString(),
        cmd.encode('utf-8'),
        opts.SerializeToString())
    try:
      tfprof_node.ParseFromString(ret)
    except message.DecodeError as e:
      sys.stderr.write('Cannot parse returned proto: %s.\n' % e)

  elif cmd == 'graph' or cmd == 'scope':
    tfprof_node = tfprof_output_pb2.GraphNodeProto()
    ret = print_mdl.PrintModelAnalysis(
        graph.as_graph_def(add_shapes=True).SerializeToString(),
        run_meta_str,
        op_log.SerializeToString(),
        cmd.encode('utf-8'),
        opts.SerializeToString())
    try:
      tfprof_node.ParseFromString(ret)
    except message.DecodeError as e:
      sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
  else:
    raise errors.InvalidArgumentError(
        None, None, 'unknown cmd: %s\n' % cmd)

  return tfprof_node