Example #1
0
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
    """Construct a TensorBoardWSGIApp with standard plugins and multiplexer.

  Args:
    flags: An argparse.Namespace containing TensorBoard CLI flags.
    plugin_loaders: A list of TBLoader instances.
    assets_zip_provider: See TBContext documentation for more information.

  Returns:
    The new TensorBoard WSGI application.

  :type plugin_loaders: list[base_plugin.TBLoader]
  :rtype: TensorBoardWSGI
  """
    data_provider = None
    multiplexer = None
    reload_interval = flags.reload_interval
    if flags.db_import:
        # DB import mode.
        db_uri = flags.db
        # Create a temporary DB file if we weren't given one.
        if not db_uri:
            tmpdir = tempfile.mkdtemp(prefix='tbimport')
            atexit.register(shutil.rmtree, tmpdir)
            db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir
        db_connection_provider = create_sqlite_connection_provider(db_uri)
        logger.info('Importing logdir into DB at %s', db_uri)
        multiplexer = db_import_multiplexer.DbImportMultiplexer(
            db_uri=db_uri,
            db_connection_provider=db_connection_provider,
            purge_orphaned_data=flags.purge_orphaned_data,
            max_reload_threads=flags.max_reload_threads)
    elif flags.db:
        # DB read-only mode, never load event logs.
        reload_interval = -1
        db_connection_provider = create_sqlite_connection_provider(flags.db)
        multiplexer = _DbModeMultiplexer(flags.db, db_connection_provider)
    else:
        # Regular logdir loading mode.
        multiplexer = event_multiplexer.EventMultiplexer(
            size_guidance=DEFAULT_SIZE_GUIDANCE,
            tensor_size_guidance=tensor_size_guidance_from_flags(flags),
            purge_orphaned_data=flags.purge_orphaned_data,
            max_reload_threads=flags.max_reload_threads,
            event_file_active_filter=_get_event_file_active_filter(flags))
        if flags.generic_data != 'false':
            data_provider = event_data_provider.MultiplexerDataProvider(
                multiplexer, flags.logdir or flags.logdir_spec)

    if reload_interval >= 0:
        # We either reload the multiplexer once when TensorBoard starts up, or we
        # continuously reload the multiplexer.
        if flags.logdir:
            path_to_run = {os.path.expanduser(flags.logdir): None}
        else:
            path_to_run = parse_event_files_spec(flags.logdir_spec)
        start_reloading_multiplexer(multiplexer, path_to_run, reload_interval,
                                    flags.reload_task)
    return TensorBoardWSGIApp(flags, plugin_loaders, data_provider,
                              assets_zip_provider, multiplexer)
 def createPlugin(self, logdir):
     multiplexer = event_multiplexer.EventMultiplexer()
     multiplexer.AddRunsFromDirectory(logdir)
     multiplexer.Reload()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     plugin_name_to_instance = {}
     context = base_plugin.TBContext(
         logdir=logdir,
         multiplexer=multiplexer,
         data_provider=provider,
         plugin_name_to_instance=plugin_name_to_instance,
     )
     scalars_plugin_instance = scalars_plugin.ScalarsPlugin(context)
     custom_scalars_plugin_instance = (
         custom_scalars_plugin.CustomScalarsPlugin(context)
     )
     plugin_instances = [
         scalars_plugin_instance,
         custom_scalars_plugin_instance,
     ]
     for plugin_instance in plugin_instances:
         plugin_name_to_instance[
             plugin_instance.plugin_name
         ] = plugin_instance
     return custom_scalars_plugin_instance
Example #3
0
    def __init__(self, flags):
        """Initializes a `LocalDataIngester` from `flags`.

        Args:
          flags: An argparse.Namespace containing TensorBoard CLI flags.

        Returns:
          The new `LocalDataIngester`.
        """
        tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
        tensor_size_guidance.update(flags.samples_per_plugin)
        self._multiplexer = plugin_event_multiplexer.EventMultiplexer(
            size_guidance=DEFAULT_SIZE_GUIDANCE,
            tensor_size_guidance=tensor_size_guidance,
            purge_orphaned_data=flags.purge_orphaned_data,
            max_reload_threads=flags.max_reload_threads,
            event_file_active_filter=_get_event_file_active_filter(flags),
            detect_file_replacement=flags.detect_file_replacement,
        )
        self._data_provider = data_provider.MultiplexerDataProvider(
            self._multiplexer, flags.logdir or flags.logdir_spec)
        self._reload_interval = flags.reload_interval
        self._reload_task = flags.reload_task
        if flags.logdir:
            self._path_to_run = {os.path.expanduser(flags.logdir): None}
        else:
            self._path_to_run = _parse_event_files_spec(flags.logdir_spec)

        # Conditionally import tensorflow_io.
        if getattr(tf, "__version__", "stub") != "stub":
            _check_filesystem_support(self._path_to_run.keys())
    def test_list_runs(self):
        # We can't control the timestamps of events written to disk (without
        # manually reading the tfrecords, modifying the data, and writing
        # them back out), so we provide a fake multiplexer instead.
        start_times = {
            "second_2": 2.0,
            "first": 1.5,
            "no_time": None,
            "second_1": 2.0,
        }

        class FakeMultiplexer(object):
            def Runs(multiplexer):
                result = ["second_2", "first", "no_time", "second_1"]
                self.assertItemsEqual(result, start_times)
                return result

            def FirstEventTimestamp(multiplexer, run):
                self.assertIn(run, start_times)
                result = start_times[run]
                if result is None:
                    raise ValueError("No event timestep could be found")
                else:
                    return result

        multiplexer = FakeMultiplexer()
        provider = data_provider.MultiplexerDataProvider(
            multiplexer, "fake_logdir")
        result = provider.list_runs(experiment_id="unused")
        self.assertItemsEqual(result, [
            base_provider.Run(run_id=run, run_name=run, start_time=start_time)
            for (run, start_time) in six.iteritems(start_times)
        ])
Example #5
0
    def test_read_tensors(self):
        multiplexer = self.create_multiplexer()
        provider = data_provider.MultiplexerDataProvider(
            multiplexer, self.logdir)

        run_tag_filter = base_provider.RunTagFilter(
            runs=["lebesgue"],
            tags=["uniform", "bimodal"],
        )
        result = provider.read_tensors(
            experiment_id="unused",
            plugin_name=histogram_metadata.PLUGIN_NAME,
            run_tag_filter=run_tag_filter,
            downsample=None,  # not yet implemented
        )

        self.assertItemsEqual(result.keys(), ["lebesgue"])
        self.assertItemsEqual(result["lebesgue"].keys(),
                              ["uniform", "bimodal"])
        for run in result:
            for tag in result[run]:
                tensor_events = multiplexer.Tensors(run, tag)
                self.assertLen(result[run][tag], len(tensor_events))
                for (datum, event) in zip(result[run][tag], tensor_events):
                    self.assertEqual(datum.step, event.step)
                    self.assertEqual(datum.wall_time, event.wall_time)
                    np.testing.assert_equal(
                        datum.numpy,
                        tensor_util.make_ndarray(event.tensor_proto),
                    )
Example #6
0
    def test_read_scalars(self):
        multiplexer = self.create_multiplexer()
        provider = data_provider.MultiplexerDataProvider(
            multiplexer, self.logdir)

        run_tag_filter = base_provider.RunTagFilter(
            runs=["waves", "polynomials", "unicorns"],
            tags=["sine", "square", "cube", "iridescence"],
        )
        result = provider.read_scalars(
            experiment_id="unused",
            plugin_name=scalar_metadata.PLUGIN_NAME,
            run_tag_filter=run_tag_filter,
            downsample=None,  # not yet implemented
        )

        self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
        self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
        self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
        for run in result:
            for tag in result[run]:
                tensor_events = multiplexer.Tensors(run, tag)
                self.assertLen(result[run][tag], len(tensor_events))
                for (datum, event) in zip(result[run][tag], tensor_events):
                    self.assertEqual(datum.step, event.step)
                    self.assertEqual(datum.wall_time, event.wall_time)
                    self.assertEqual(
                        datum.value,
                        tensor_util.make_ndarray(event.tensor_proto).item(),
                    )
Example #7
0
 def setUp(self):
     self._mock_tb_context = base_plugin.TBContext()
     # TODO(#3425): Remove mocking or switch to mocking data provider
     # APIs directly.
     self._mock_multiplexer = mock.create_autospec(
         plugin_event_multiplexer.EventMultiplexer
     )
     self._mock_tb_context.multiplexer = self._mock_multiplexer
     self._mock_multiplexer.PluginRunToTagToContent.side_effect = (
         self._mock_plugin_run_to_tag_to_content
     )
     self._mock_multiplexer.AllSummaryMetadata.side_effect = (
         self._mock_all_summary_metadata
     )
     self._mock_multiplexer.SummaryMetadata.side_effect = (
         self._mock_summary_metadata
     )
     self._mock_tb_context.data_provider = (
         data_provider.MultiplexerDataProvider(
             self._mock_multiplexer, "/path/to/logs"
         )
     )
     self.session_1_start_info_ = ""
     self.session_2_start_info_ = ""
     self.session_3_start_info_ = ""
Example #8
0
    def __init__(self, flags):
        """Initializes a `LocalDataIngester` from `flags`.

        Args:
          flags: An argparse.Namespace containing TensorBoard CLI flags.

        Returns:
          The new `LocalDataIngester`.
        """
        tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
        tensor_size_guidance.update(flags.samples_per_plugin)
        self._multiplexer = plugin_event_multiplexer.EventMultiplexer(
            size_guidance=DEFAULT_SIZE_GUIDANCE,
            tensor_size_guidance=tensor_size_guidance,
            purge_orphaned_data=flags.purge_orphaned_data,
            max_reload_threads=flags.max_reload_threads,
            event_file_active_filter=_get_event_file_active_filter(flags),
        )
        self._data_provider = data_provider.MultiplexerDataProvider(
            self._multiplexer, flags.logdir or flags.logdir_spec)
        self._reload_interval = flags.reload_interval
        self._reload_task = flags.reload_task
        if flags.logdir:
            self._path_to_run = {os.path.expanduser(flags.logdir): None}
        else:
            self._path_to_run = _parse_event_files_spec(flags.logdir_spec)
Example #9
0
 def _SetupWSGIApp(self):
     logdir = self.log_dir
     multiplexer = event_multiplexer.EventMultiplexer()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     context = base_plugin.TBContext(logdir=logdir, data_provider=provider)
     self.plugin = projector_plugin.ProjectorPlugin(context)
     wsgi_app = application.TensorBoardWSGI([self.plugin])
     self.server = werkzeug_test.Client(wsgi_app, wrappers.Response)
 def setUp(self):
     super().setUp()
     (logdir, multiplexer) = self._create_data()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     ctx = base_plugin.TBContext(logdir=logdir, data_provider=provider)
     plugin = images_plugin.ImagesPlugin(ctx)
     wsgi_app = application.TensorBoardWSGI([plugin])
     self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
     self.routes = plugin.get_plugin_apps()
Example #11
0
 def load_plugin(self):
     self.generate_testdata()
     multiplexer = event_multiplexer.EventMultiplexer()
     multiplexer.AddRunsFromDirectory(self.logdir)
     multiplexer.Reload()
     provider = data_provider.MultiplexerDataProvider(
         multiplexer, self.logdir)
     ctx = base_plugin.TBContext(logdir=self.logdir, data_provider=provider)
     return text_plugin.TextPlugin(ctx)
Example #12
0
 def test_read_tensors_downsamples(self):
     multiplexer = self.create_multiplexer()
     provider = data_provider.MultiplexerDataProvider(
         multiplexer, self.logdir)
     result = provider.read_tensors(
         experiment_id="unused",
         plugin_name=histogram_metadata.PLUGIN_NAME,
         downsample=3,
     )
     self.assertLen(result["lebesgue"]["uniform"], 3)
Example #13
0
 def test_read_scalars_downsamples(self):
     multiplexer = self.create_multiplexer()
     provider = data_provider.MultiplexerDataProvider(
         multiplexer, self.logdir)
     result = provider.read_scalars(
         experiment_id="unused",
         plugin_name=scalar_metadata.PLUGIN_NAME,
         downsample=3,
     )
     self.assertLen(result["waves"]["sine"], 3)
Example #14
0
 def test_read_scalars_downsamples(self):
     # TODO(@wchargin): Verify that this always includes the most
     # recent datum, as specified by the interface.
     multiplexer = self.create_multiplexer()
     provider = data_provider.MultiplexerDataProvider(
         multiplexer, self.logdir)
     result = provider.read_scalars(
         experiment_id="unused",
         plugin_name=scalar_metadata.PLUGIN_NAME,
         downsample=3,
     )
     self.assertLen(result["waves"]["sine"], 3)
 def wrapper(self, *args, **kwargs):
     (logdir, multiplexer) = self.load_runs(run_specs)
     with self.subTest("generic data provider"):
         flags = argparse.Namespace(generic_data="true")
         provider = data_provider.MultiplexerDataProvider(
             multiplexer, logdir)
         ctx = base_plugin.TBContext(
             flags=flags,
             logdir=logdir,
             multiplexer=multiplexer,
             data_provider=provider,
         )
         fn(self, graphs_plugin.GraphsPlugin(ctx), *args, **kwargs)
 def load_plugin(self, run_specs):
     logdir = self.get_temp_dir()
     for run_spec in run_specs:
         self.generate_run(logdir, *run_spec)
     multiplexer = event_multiplexer.EventMultiplexer()
     multiplexer.AddRunsFromDirectory(logdir)
     multiplexer.Reload()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     ctx = base_plugin.TBContext(
         logdir=logdir,
         multiplexer=multiplexer,
         data_provider=provider,
     )
     return graphs_plugin.GraphsPlugin(ctx)
 def setUp(self):
     super(CorePluginResourceTest, self).setUp()
     self.logdir = self.get_temp_dir()
     self.multiplexer = event_multiplexer.EventMultiplexer()
     provider = data_provider.MultiplexerDataProvider(
         self.multiplexer, self.logdir)
     context = base_plugin.TBContext(
         assets_zip_provider=get_test_assets_zip_provider(),
         logdir=self.logdir,
         data_provider=provider,
     )
     self.plugin = core_plugin.CorePlugin(context)
     app = application.TensorBoardWSGI([self.plugin])
     self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
Example #18
0
 def set_up_with_runs(self, run_names):
     logdir = self.get_temp_dir()
     for run_name in run_names:
         self.generate_run(logdir, run_name)
     multiplexer = event_multiplexer.EventMultiplexer(
         size_guidance={
             # don't truncate my test data, please
             tag_types.TENSORS:
             self._STEPS,
         })
     multiplexer.AddRunsFromDirectory(logdir)
     multiplexer.Reload()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     context = base_plugin.TBContext(logdir=logdir, data_provider=provider)
     self.plugin = distributions_plugin.DistributionsPlugin(context)
Example #19
0
 def wrapper(self, *args, **kwargs):
   (logdir, multiplexer) = self.load_runs(run_names)
   with self.subTest('bare multiplexer'):
     ctx = base_plugin.TBContext(logdir=logdir, multiplexer=multiplexer)
     fn(self, histograms_plugin.HistogramsPlugin(ctx), *args, **kwargs)
   with self.subTest('generic data provider'):
     flags = argparse.Namespace(generic_data='true')
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     ctx = base_plugin.TBContext(
         flags=flags,
         logdir=logdir,
         multiplexer=multiplexer,
         data_provider=provider,
     )
     fn(self, histograms_plugin.HistogramsPlugin(ctx), *args, **kwargs)
    def setUp(self):
        super(MetricsPluginTest, self).setUp()
        self._logdir = self.get_temp_dir()
        self._multiplexer = event_multiplexer.EventMultiplexer()

        flags = argparse.Namespace(generic_data="true")
        provider = data_provider.MultiplexerDataProvider(
            self._multiplexer, self._logdir)
        ctx = base_plugin.TBContext(
            flags=flags,
            logdir=self._logdir,
            multiplexer=self._multiplexer,
            data_provider=provider,
        )
        self._plugin = metrics_plugin.MetricsPlugin(ctx)
 def _send_request(self, path_prefix, pathname):
     multiplexer = event_multiplexer.EventMultiplexer()
     logdir = self.get_temp_dir()
     provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
     context = base_plugin.TBContext(
         assets_zip_provider=get_test_assets_zip_provider(),
         logdir=logdir,
         data_provider=provider,
         window_title="",
         flags=FakeFlags(path_prefix=path_prefix),
     )
     plugin = core_plugin.CorePlugin(context)
     app = application.TensorBoardWSGI([plugin], path_prefix=path_prefix)
     server = werkzeug_test.Client(app, wrappers.BaseResponse)
     return server.get(pathname)
Example #22
0
    def create_plugin(self, generate_testdata=True):
        if generate_testdata:
            self.generate_testdata()

        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(self.logdir)
        multiplexer.Reload()

        provider = data_provider.MultiplexerDataProvider(
            multiplexer, self.logdir)

        ctx = base_plugin.TBContext(logdir=self.logdir,
                                    multiplexer=multiplexer,
                                    data_provider=provider)

        return npmi_plugin.NpmiPlugin(ctx)
    def testEnvironmentDebugOnExplicitly(self):
        multiplexer = event_multiplexer.EventMultiplexer()
        logdir = self.get_temp_dir()
        provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
        context = base_plugin.TBContext(
            assets_zip_provider=get_test_assets_zip_provider(),
            logdir=logdir,
            data_provider=provider,
            window_title="title foo",
        )
        plugin = core_plugin.CorePlugin(context, include_debug_info=True)
        app = application.TensorBoardWSGI([plugin])
        server = werkzeug_test.Client(app, wrappers.BaseResponse)

        parsed_object = self._get_json(server, "/data/environment")
        self.assertIn("debug", parsed_object)
    def setUp(self):
        super(PrCurvesPluginTest, self).setUp()
        logdir = os.path.join(self.get_temp_dir(), "logdir")

        # Generate data.
        pr_curve_demo.run_all(logdir=logdir,
                              steps=3,
                              thresholds=5,
                              verbose=False)

        # Create a multiplexer for reading the data we just wrote.
        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(logdir)
        multiplexer.Reload()
        provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)

        context = base_plugin.TBContext(logdir=logdir, data_provider=provider)
        self.plugin = pr_curves_plugin.PrCurvesPlugin(context)
    def create_plugin(self, generate_testdata=True, include_text=True):
        """Run a test with a `data_provider`."""
        if generate_testdata:
            self.generate_testdata(include_text=include_text)

        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(self.logdir)
        multiplexer.Reload()

        provider = data_provider.MultiplexerDataProvider(
            multiplexer, self.logdir
        )

        ctx = base_plugin.TBContext(
            logdir=self.logdir, multiplexer=multiplexer, data_provider=provider,
        )

        return text_v2_plugin.TextV2Plugin(ctx)
    def load_plugin(self, run_names):
        logdir = self.get_temp_dir()
        for run_name in run_names:
            self.generate_run(logdir, run_name)
        multiplexer = event_multiplexer.EventMultiplexer(
            size_guidance={
                # don't truncate my test data, please
                tag_types.TENSORS:
                self._STEPS,
            })
        multiplexer.AddRunsFromDirectory(logdir)
        multiplexer.Reload()

        provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)
        ctx = base_plugin.TBContext(
            logdir=logdir,
            data_provider=provider,
        )
        return scalars_plugin.ScalarsPlugin(ctx)
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
    """Construct a TensorBoardWSGIApp with standard plugins and multiplexer.

    Args:
      flags: An argparse.Namespace containing TensorBoard CLI flags.
      plugin_loaders: A list of TBLoader instances.
      assets_zip_provider: See TBContext documentation for more information.

    Returns:
      The new TensorBoard WSGI application.

    :type plugin_loaders: list[base_plugin.TBLoader]
    :rtype: TensorBoardWSGI
    """
    data_provider = None
    multiplexer = None
    reload_interval = flags.reload_interval
    # Regular logdir loading mode.
    sampling_hints = flags.samples_per_plugin
    multiplexer = event_multiplexer.EventMultiplexer(
        size_guidance=DEFAULT_SIZE_GUIDANCE,
        tensor_size_guidance=_apply_tensor_size_guidance(sampling_hints),
        purge_orphaned_data=flags.purge_orphaned_data,
        max_reload_threads=flags.max_reload_threads,
        event_file_active_filter=_get_event_file_active_filter(flags),
    )
    data_provider = event_data_provider.MultiplexerDataProvider(
        multiplexer, flags.logdir or flags.logdir_spec
    )

    if reload_interval >= 0:
        # We either reload the multiplexer once when TensorBoard starts up, or we
        # continuously reload the multiplexer.
        if flags.logdir:
            path_to_run = {os.path.expanduser(flags.logdir): None}
        else:
            path_to_run = parse_event_files_spec(flags.logdir_spec)
        start_reloading_multiplexer(
            multiplexer, path_to_run, reload_interval, flags.reload_task
        )
    return TensorBoardWSGIApp(
        flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer
    )
Example #28
0
 def wrapper(self, *args, **kwargs):
     (logdir, multiplexer) = self._create_data()
     with self.subTest("bare multiplexer"):
         ctx = base_plugin.TBContext(logdir=logdir,
                                     multiplexer=multiplexer)
         plugin = images_plugin.ImagesPlugin(ctx)
         self._initialize_plugin_specific_attrs(plugin)
         fn(self, plugin, *args, **kwargs)
     with self.subTest("generic data provider"):
         flags = argparse.Namespace(generic_data="true")
         provider = data_provider.MultiplexerDataProvider(
             multiplexer, logdir)
         ctx = base_plugin.TBContext(
             flags=flags,
             logdir=logdir,
             multiplexer=multiplexer,
             data_provider=provider,
         )
         plugin = images_plugin.ImagesPlugin(ctx)
         self._initialize_plugin_specific_attrs(plugin)
         fn(self, plugin, *args, **kwargs)
Example #29
0
 def wrapper(self, *args, **kwargs):
     if generate_testdata:
         self.generate_testdata(include_text=include_text)
     multiplexer = event_multiplexer.EventMultiplexer()
     multiplexer.AddRunsFromDirectory(self.logdir)
     multiplexer.Reload()
     with self.subTest("bare multiplexer"):
         ctx = base_plugin.TBContext(logdir=self.logdir,
                                     multiplexer=multiplexer)
         fn(self, text_plugin.TextPlugin(ctx), *args, **kwargs)
     with self.subTest("generic data provider"):
         flags = argparse.Namespace(generic_data="true")
         provider = data_provider.MultiplexerDataProvider(
             multiplexer, self.logdir)
         ctx = base_plugin.TBContext(
             flags=flags,
             logdir=self.logdir,
             multiplexer=multiplexer,
             data_provider=provider,
         )
         fn(self, text_plugin.TextPlugin(ctx), *args, **kwargs)
Example #30
0
 def create_provider(self):
     multiplexer = self.create_multiplexer()
     return data_provider.MultiplexerDataProvider(multiplexer, self.logdir)