Example #1
0
    def __init__(self, context):
        """Constructs a profiler plugin for TensorBoard.

    This plugin adds handlers for performance-related frontends.

    Args:
      context: A base_plugin.TBContext instance.
    """
        self.logdir = context.logdir
        self.plugin_logdir = plugin_asset_util.PluginDirectory(
            self.logdir, ProfilePlugin.plugin_name)
Example #2
0
    def __init__(self, context):
        self._MULTIPLEXER = context.multiplexer
        self.PLUGIN_LOGDIR = pau.PluginDirectory(context.logdir, PLUGIN_NAME)
        self.FPS = 10
        self.most_recent_frame = get_image_relative_to_script('no-data.png')
        self.most_recent_info = [{
            'name': 'Waiting for data...',
        }]

        if not tf.gfile.Exists(self.PLUGIN_LOGDIR):
            tf.gfile.MakeDirs(self.PLUGIN_LOGDIR)
            write_pickle(DEFAULT_CONFIG,
                         '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
Example #3
0
def dump_data(logdir):
    """Dumps plugin data to the log directory."""
    # Create a tfevents file in the logdir so it is detected as a run.
    write_empty_event_file(logdir)

    plugin_logdir = plugin_asset_util.PluginDirectory(
        logdir, profile_plugin.ProfilePlugin.plugin_name)
    _maybe_create_directory(plugin_logdir)

    for run in profile_demo_data.RUNS:
        run_dir = os.path.join(plugin_logdir, run)
        _maybe_create_directory(run_dir)
        if run in profile_demo_data.TRACES:
            with open(os.path.join(run_dir, "trace"), "w") as f:
                proto = trace_events_pb2.Trace()
                text_format.Merge(profile_demo_data.TRACES[run], proto)
                f.write(proto.SerializeToString())

        if run not in profile_demo_data.TRACE_ONLY:
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.op_profile.json",
                os.path.join(run_dir, "op_profile.json"),
            )
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.memory_viewer.json",
                os.path.join(run_dir, "memory_viewer.json"),
            )
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.pod_viewer.json",
                os.path.join(run_dir, "pod_viewer.json"),
            )
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.google_chart_demo.json",
                os.path.join(run_dir, "google_chart_demo.json"),
            )
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.input_pipeline.json",
                os.path.join(run_dir, "input_pipeline.json"),
            )
            shutil.copyfile(
                "tensorboard/plugins/profile/profile_demo.overview_page.json",
                os.path.join(run_dir, "overview_page.json"),
            )

    # Unsupported tool data should not be displayed.
    run_dir = os.path.join(plugin_logdir, "empty")
    _maybe_create_directory(run_dir)
    with open(os.path.join(run_dir, "unsupported"), "w") as f:
        f.write("unsupported data")
Example #4
0
    def setUp(self):
        # Populate the log directory with runs and traces.
        self.logdir = self.get_temp_dir()
        plugin_logdir = plugin_asset_util.PluginDirectory(
            self.logdir, profile_plugin.ProfilePlugin.plugin_name)
        os.makedirs(plugin_logdir)
        self.run_to_tools = {
            'foo': ['trace_viewer'],
            'bar': ['unsupported'],
            'baz': ['trace_viewer'],
            'empty': [],
        }
        self.run_to_hosts = {
            'foo': ['host0', 'host1'],
            'bar': ['host1'],
            'baz': ['host2'],
            'empty': [],
        }
        for run in self.run_to_tools:
            run_dir = os.path.join(plugin_logdir, run)
            os.mkdir(run_dir)
            for tool in self.run_to_tools[run]:
                if tool not in profile_plugin.TOOLS:
                    continue
                for host in self.run_to_hosts[run]:
                    file_name = host + profile_plugin.TOOLS[tool]
                    tool_file = os.path.join(run_dir, file_name)
                    if tool == 'trace_viewer':
                        trace = trace_events_pb2.Trace()
                        trace.devices[0].name = run
                        data = trace.SerializeToString()
                    else:
                        data = tool
                    with open(tool_file, 'wb') as f:
                        f.write(data)
        with open(os.path.join(plugin_logdir, 'noise'), 'w') as f:
            f.write('Not a dir, not a run.')

        # The profiler plugin does not use the multiplexer, so we do not bother to
        # construct a meaningful one right now. In fact, the profiler plugin does
        # not use this context object in general. Its constructor has to accept it
        # though, and it may use the context in the future.
        context = base_plugin.TBContext(logdir=self.logdir,
                                        multiplexer=None,
                                        flags=FakeFlags(self.logdir))
        self.plugin = profile_plugin.ProfilePlugin(context)
        self.apps = self.plugin.get_plugin_apps()
Example #5
0
    def _run_dir(self, run):
        """Helper that maps a frontend run name to a profile "run" directory.

    The frontend run name consists of the TensorBoard run name (aka the relative
    path from the logdir root to the directory containing the data) path-joined
    to the Profile plugin's "run" concept (which is a subdirectory of the
    plugins/profile directory representing an individual run of the tool), with
    the special case that TensorBoard run is the logdir root (which is the run
    named '.') then only the Profile plugin "run" name is used, for backwards
    compatibility.

    To convert back to the actual run directory, we apply the following
    transformation:
    - If the run name doesn't contain '/', prepend './'
    - Split on the rightmost instance of '/'
    - Assume the left side is a TensorBoard run name and map it to a directory
      path using EventMultiplexer.RunPaths(), then map that to the profile
      plugin directory via PluginDirectory()
    - Assume the right side is a Profile plugin "run" and path-join it to
      the preceding path to get the final directory

    Args:
      run: the frontend run name, as described above, e.g. train/run1.

    Returns:
      The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
    """
        run = run.rstrip('/')
        if '/' not in run:
            run = './' + run
        tb_run_name, _, profile_run_name = run.rpartition('/')
        tb_run_directory = self.multiplexer.RunPaths().get(tb_run_name)
        if tb_run_directory is None:
            # Check if logdir is a directory to handle case where it's actually a
            # multipart directory spec, which this plugin does not support.
            if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
                tb_run_directory = self.logdir
            else:
                raise RuntimeError("No matching run directory for run %s" %
                                   run)
        plugin_directory = plugin_asset_util.PluginDirectory(
            tb_run_directory, PLUGIN_NAME)
        return os.path.join(plugin_directory, profile_run_name)
Example #6
0
def dump_data(logdir):
    """Dumps plugin data to the log directory."""
    plugin_logdir = plugin_asset_util.PluginDirectory(
        logdir, profile_plugin.ProfilePlugin.plugin_name)
    _maybe_create_directory(plugin_logdir)

    for run in profile_demo_data.RUNS:
        run_dir = os.path.join(plugin_logdir, run)
        _maybe_create_directory(run_dir)
        if run in profile_demo_data.TRACES:
            with open(os.path.join(run_dir, 'trace'), 'w') as f:
                proto = trace_events_pb2.Trace()
                text_format.Merge(profile_demo_data.TRACES[run], proto)
                f.write(proto.SerializeToString())

    # Unsupported tool data should not be displayed.
    run_dir = os.path.join(plugin_logdir, 'empty')
    _maybe_create_directory(run_dir)
    with open(os.path.join(run_dir, 'unsupported'), 'w') as f:
        f.write('unsupported data')
Example #7
0
    def __init__(self, context):
        """Constructs a profiler plugin for TensorBoard.

    This plugin adds handlers for performance-related frontends.

    Args:
      context: A base_plugin.TBContext instance.
    """
        self.logdir = context.logdir
        self.multiplexer = context.multiplexer
        self.plugin_logdir = plugin_asset_util.PluginDirectory(
            self.logdir, PLUGIN_NAME)
        self.stub = None
        self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel

        # Whether the plugin is active. This is an expensive computation, so we
        # compute this asynchronously and cache positive results indefinitely.
        self._is_active = False
        # Lock to ensure at most one thread computes _is_active at a time.
        self._is_active_lock = threading.Lock()
Example #8
0
def dump_data(logdir):
    """Dumps plugin data to the log directory."""
    # Create a tfevents file in the logdir so it is detected as a run.
    write_empty_event_file(logdir)

    plugin_logdir = plugin_asset_util.PluginDirectory(
        logdir, profile_plugin.ProfilePlugin.plugin_name)
    _maybe_create_directory(plugin_logdir)

    for run in profile_demo_data.RUNS:
        run_dir = os.path.join(plugin_logdir, run)
        _maybe_create_directory(run_dir)
        if run in profile_demo_data.TRACES:
            with open(os.path.join(run_dir, 'trace'), 'wb') as f:
                proto = trace_events_pb2.Trace()
                text_format.Merge(profile_demo_data.TRACES[run], proto)
                f.write(proto.SerializeToString())

        if run not in profile_demo_data.TRACE_ONLY:
            shutil.copyfile((DEMO_DIR + 'profile_demo.op_profile.json'),
                            os.path.join(run_dir, 'op_profile.json'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.memory_viewer.json'),
                            os.path.join(run_dir, 'memory_viewer.json'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.pod_viewer.json'),
                            os.path.join(run_dir, 'pod_viewer.json'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.input_pipeline.json'),
                            os.path.join(run_dir, 'input_pipeline.json'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.overview_page.json'),
                            os.path.join(run_dir, 'overview_page.json'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.tensorflow_stats.pb'),
                            os.path.join(run_dir, 'tensorflow_stats.pb'))
            shutil.copyfile((DEMO_DIR + 'profile_demo.overview_page.pb'),
                            os.path.join(run_dir, 'overview_page.pb')),
            shutil.copyfile((DEMO_DIR + 'profile_demo.input_pipeline.pb'),
                            os.path.join(run_dir, 'input_pipeline.pb'))

    # Unsupported tool data should not be displayed.
    run_dir = os.path.join(plugin_logdir, 'empty')
    _maybe_create_directory(run_dir)
    with open(os.path.join(run_dir, 'unsupported'), 'w') as f:
        f.write('unsupported data')
def generate_testdata(logdir):
    plugin_logdir = plugin_asset_util.PluginDirectory(
        logdir, profile_plugin.ProfilePlugin.plugin_name)
    os.makedirs(plugin_logdir)
    for run in RUN_TO_TOOLS:
        run_dir = os.path.join(plugin_logdir, run)
        os.mkdir(run_dir)
        for tool in RUN_TO_TOOLS[run]:
            if tool not in profile_plugin.TOOLS:
                continue
            for host in RUN_TO_HOSTS[run]:
                file_name = host + profile_plugin.TOOLS[tool]
                tool_file = os.path.join(run_dir, file_name)
                if tool == 'trace_viewer':
                    trace = trace_events_pb2.Trace()
                    trace.devices[0].name = run
                    data = trace.SerializeToString()
                else:
                    data = tool
                with open(tool_file, 'wb') as f:
                    f.write(data)
    with open(os.path.join(plugin_logdir, 'noise'), 'w') as f:
        f.write('Not a dir, not a run.')
Example #10
0
    def _run_dir(self, run):
        """Helper that maps a frontend run name to a profile "run" directory.

    The frontend run name consists of the TensorBoard run name (aka the relative
    path from the logdir root to the directory containing the data) path-joined
    to the Profile plugin's "run" concept (which is a subdirectory of the
    plugins/profile directory representing an individual run of the tool), with
    the special case that TensorBoard run is the logdir root (which is the run
    named '.') then only the Profile plugin "run" name is used, for backwards
    compatibility.

    Args:
      run: the frontend run name, as described above, e.g. train/run1.

    Returns:
      The resolved directory path, e.g. /logdir/train/plugins/profile/run1.

    Raises:
      RuntimeError: If the run directory is not found.
    """
        run = run.rstrip(os.sep)
        tb_run_name, profile_run_name = os.path.split(run)
        if not tb_run_name:
            tb_run_name = '.'
        tb_run_directory = self.multiplexer.RunPaths().get(tb_run_name)
        if tb_run_directory is None:
            # Check if logdir is a directory to handle case where it's actually a
            # multipart directory spec, which this plugin does not support.
            if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
                tb_run_directory = self.logdir
            else:
                raise RuntimeError('No matching run directory for run %s' %
                                   run)
        plugin_directory = plugin_asset_util.PluginDirectory(
            tb_run_directory, PLUGIN_NAME)
        return os.path.join(plugin_directory, profile_run_name)
Example #11
0
    def generate_run_to_tools(self):
        """Generator for pairs of "run name" and a list of tools for that run.

    The "run name" here is a "frontend run name" - see _run_dir() for the
    definition of a "frontend run name" and how it maps to a directory of
    profile data for a specific profile "run". The profile plugin concept of
    "run" is different from the normal TensorBoard run; each run in this case
    represents a single instance of profile data collection, more similar to a
    "step" of data in typical TensorBoard semantics. These runs reside in
    subdirectories of the plugins/profile directory within any regular
    TensorBoard run directory (defined as a subdirectory of the logdir that
    contains at least one tfevents file) or within the logdir root directory
    itself (even if it contains no tfevents file and would thus not be
    considered a normal TensorBoard run, for backwards compatibility).
    Within those "profile run directories", there are files in the directory
    that correspond to different profiling tools. The file that contains profile
    for a specific tool "x" will have a suffix name TOOLS["x"].
    Example:
      logs/
        plugins/
          profile/
            run1/
              hostA.trace
        train/
          events.out.tfevents.foo
          plugins/
            profile/
              run1/
                hostA.trace
                hostB.trace
              run2/
                hostA.trace
        validation/
          events.out.tfevents.foo
          plugins/
            profile/
              run1/
                hostA.trace
    Yields:
      A sequence of tuples mapping "frontend run names" to lists of tool names
      available for those runs. For the above example, this would be:
          ("run1", ["trace_viewer"])
          ("train/run1", ["trace_viewer"])
          ("train/run2", ["trace_viewer"])
          ("validation/run1", ["trace_viewer"])
    """
        self.start_grpc_stub_if_necessary()

        plugin_assets = self.multiplexer.PluginAssets(PLUGIN_NAME)
        tb_run_names_to_dirs = self.multiplexer.RunPaths()

        # Ensure that we also check the root logdir, even if it isn't a recognized
        # TensorBoard run (i.e. has no tfevents file directly under it), to remain
        # backwards compatible with previously profile plugin behavior. Note that we
        # check if logdir is a directory to handle case where it's actually a
        # multipart directory spec, which this plugin does not support.
        if '.' not in plugin_assets and tf.io.gfile.isdir(self.logdir):
            tb_run_names_to_dirs['.'] = self.logdir
            plugin_assets['.'] = plugin_asset_util.ListAssets(
                self.logdir, PLUGIN_NAME)

        for tb_run_name, profile_runs in six.iteritems(plugin_assets):
            tb_run_dir = tb_run_names_to_dirs[tb_run_name]
            tb_plugin_dir = plugin_asset_util.PluginDirectory(
                tb_run_dir, PLUGIN_NAME)
            for profile_run in profile_runs:
                # Remove trailing separator; some filesystem implementations emit this.
                profile_run = profile_run.rstrip(os.sep)
                if tb_run_name == '.':
                    frontend_run = profile_run
                else:
                    frontend_run = os.path.join(tb_run_name, profile_run)
                profile_run_dir = os.path.join(tb_plugin_dir, profile_run)
                if tf.io.gfile.isdir(profile_run_dir):
                    yield frontend_run, self._get_active_tools(profile_run_dir)