Пример #1
0
 def _test_distributions(self, run_name, tag_name, should_work=True):
     self.set_up_with_runs(
         [
             self._RUN_WITH_SCALARS,
             self._RUN_WITH_LEGACY_DISTRIBUTION,
             self._RUN_WITH_DISTRIBUTION,
         ]
     )
     if should_work:
         (data, mime_type) = self.plugin.distributions_impl(
             context.RequestContext(), tag_name, run_name, experiment="exp"
         )
         self.assertEqual("application/json", mime_type)
         self.assertEqual(len(data), self._STEPS)
         for i in xrange(self._STEPS):
             [_unused_wall_time, step, bps_and_icdfs] = data[i]
             self.assertEqual(i, step)
             (bps, _unused_icdfs) = zip(*bps_and_icdfs)
             self.assertEqual(bps, compressor.NORMAL_HISTOGRAM_BPS)
     else:
         with self.assertRaises(errors.NotFoundError):
             self.plugin.distributions_impl(
                 context.RequestContext(),
                 self._DISTRIBUTION_TAG,
                 run_name,
                 experiment="exp",
             )
Пример #2
0
    def test_image_data_from_time_series_query(self):
        self._write_image("run1", "images/tagA", samples=3)
        self._multiplexer.Reload()

        requests = [
            {
                "plugin": "images",
                "tag": "images/tagA",
                "run": "run1",
                "sample": 2,
            }
        ]
        original_response = self._plugin._time_series_impl(
            context.RequestContext(), "expid", requests
        )
        response = self._plugin._time_series_impl(
            context.RequestContext(), "expid", requests
        )
        clean_response = self._clean_time_series_responses(response)

        self.assertEqual(
            [
                {
                    "plugin": "images",
                    "tag": "images/tagA",
                    "run": "run1",
                    "sample": 2,
                    "runToSeries": {
                        "run1": [
                            {
                                "wallTime": "<wall_time>",
                                "step": 0,
                                "imageId": "<image_id>",
                            }
                        ]
                    },
                }
            ],
            clean_response,
        )

        image_id = original_response[0]["runToSeries"]["run1"][0]["imageId"]
        (data, content_type) = self._plugin._image_data_impl(
            context.RequestContext(), image_id
        )

        self.assertIsInstance(data, bytes)
        self.assertGreater(len(data), 0)
Пример #3
0
 def test_index(self):
     self.set_up_with_runs(
         [
             self._RUN_WITH_SCALARS,
             self._RUN_WITH_LEGACY_DISTRIBUTION,
             self._RUN_WITH_DISTRIBUTION,
         ]
     )
     self.assertEqual(
         {
             # _RUN_WITH_SCALARS omitted: No distribution data.
             self._RUN_WITH_LEGACY_DISTRIBUTION: {
                 self._LEGACY_DISTRIBUTION_TAG: {
                     "displayName": self._LEGACY_DISTRIBUTION_TAG,
                     "description": "",
                 },
             },
             self._RUN_WITH_DISTRIBUTION: {
                 "%s/histogram_summary"
                 % self._DISTRIBUTION_TAG: {
                     "displayName": self._DISPLAY_NAME,
                     "description": self._HTML_DESCRIPTION,
                 },
             },
         },
         self.plugin.index_impl(context.RequestContext(), experiment="exp"),
     )
Пример #4
0
 def testPluginIndexImpl(self):
     plugin = self.load_plugin()
     run_to_tags = plugin.index_impl(context.RequestContext(),
                                     experiment="123")
     self.assertItemsEqual(["fry", "leela"], run_to_tags.keys())
     self.assertItemsEqual(["message", "vector"], run_to_tags["fry"])
     self.assertItemsEqual(["message", "vector"], run_to_tags["leela"])
Пример #5
0
 def testAnnotations(self):
     plugin = self.create_plugin()
     annotations = plugin.annotations_impl(
         context.RequestContext(), experiment="exp",
     )
     self.assertItemsEqual(["name_1", "name_2"], annotations["run_1"])
     self.assertItemsEqual(["name_1", "name_2"], annotations["run_2"])
Пример #6
0
 def test_index(self):
     plugin = self.load_plugin(
         [
             self._RUN_WITH_SCALARS,
             self._RUN_WITH_LEGACY_HISTOGRAM,
             self._RUN_WITH_HISTOGRAM,
         ]
     )
     self.assertEqual(
         {
             # _RUN_WITH_SCALARS omitted: No histogram data.
             self._RUN_WITH_LEGACY_HISTOGRAM: {
                 self._LEGACY_HISTOGRAM_TAG: {
                     "displayName": self._LEGACY_HISTOGRAM_TAG,
                     "description": "",
                 },
             },
             self._RUN_WITH_HISTOGRAM: {
                 "%s/histogram_summary"
                 % self._HISTOGRAM_TAG: {
                     "displayName": self._DISPLAY_NAME,
                     "description": self._HTML_DESCRIPTION,
                 },
             },
         },
         plugin.index_impl(context.RequestContext(), experiment="exp"),
     )
Пример #7
0
    def test_time_series_scalar(self):
        self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200])
        self._multiplexer.Reload()

        requests = [{"plugin": "scalars", "tag": "scalars/tagA"}]
        response = self._plugin._time_series_impl(context.RequestContext(), "",
                                                  requests)
        clean_response = self._clean_time_series_responses(response)

        self.assertEqual(
            [{
                "plugin": "scalars",
                "tag": "scalars/tagA",
                "runToSeries": {
                    "run1": [
                        {
                            "wallTime": "<wall_time>",
                            "step": 0,
                            "value": 0.0,
                        },
                        {
                            "wallTime": "<wall_time>",
                            "step": 1,
                            "value": 100.0,
                        },
                        {
                            "wallTime": "<wall_time>",
                            "step": 2,
                            "value": -200.0,
                        },
                    ]
                },
            }],
            clean_response,
        )
Пример #8
0
    def test_time_series_single_request_specific_run(self):
        self._write_scalar_data("run1", "scalars/tagA", [0])
        self._write_scalar_data("run2", "scalars/tagA", [1])

        self._multiplexer.Reload()

        requests = [{
            "plugin": "scalars",
            "tag": "scalars/tagA",
            "run": "run2"
        }]
        response = self._plugin._time_series_impl(context.RequestContext(), "",
                                                  requests)
        clean_response = self._clean_time_series_responses(response)

        self.assertEqual(
            [{
                "plugin": "scalars",
                "runToSeries": {
                    "run2": [
                        {
                            "step": 0,
                            "value": 1.0,
                            "wallTime": "<wall_time>",
                        },
                    ],
                },
                "tag": "scalars/tagA",
                "run": "run2",
            }],
            clean_response,
        )
Пример #9
0
    def test_tags_conflicting_description(self):
        self._write_scalar("run1", "scalars/tagA", None)
        self._write_scalar("run2", "scalars/tagA", "tagA is hot")
        self._write_scalar("run3", "scalars/tagA", "tagA is cold")
        self._write_scalar("run4", "scalars/tagA", "tagA is cold")
        self._write_histogram("run1", "histograms/tagA", None)
        self._write_histogram("run2", "histograms/tagA", "tagA is hot")
        self._write_histogram("run3", "histograms/tagA", "tagA is cold")
        self._write_histogram("run4", "histograms/tagA", "tagA is cold")
        self._multiplexer.Reload()

        response = self._plugin._tags_impl(context.RequestContext(), "eid")

        expected_composite_description = ("<h1>Multiple descriptions</h1>\n"
                                          "<h2>For runs: run3, run4</h2>\n"
                                          "<p>tagA is cold</p>\n"
                                          "<h2>For run: run2</h2>\n"
                                          "<p>tagA is hot</p>")
        self.assertEqual(
            {"scalars/tagA": expected_composite_description},
            response["scalars"]["tagDescriptions"],
        )
        self.assertEqual(
            {"histograms/tagA": expected_composite_description},
            response["histograms"]["tagDescriptions"],
        )
Пример #10
0
    def test_tags_unsafe_conflicting_description(self):
        self._write_scalar("<&#run1>", "scalars/<&#tag>", None)
        self._write_scalar("<&#run2>", "scalars/<&#tag>", "<&# is hot>")
        self._write_scalar("<&#run3>", "scalars/<&#tag>", "<&# is cold>")
        self._write_scalar("<&#run4>", "scalars/<&#tag>", "<&# is cold>")
        self._write_histogram("<&#run1>", "histograms/<&#tag>", None)
        self._write_histogram("<&#run2>", "histograms/<&#tag>", "<&# is hot>")
        self._write_histogram("<&#run3>", "histograms/<&#tag>", "<&# is cold>")
        self._write_histogram("<&#run4>", "histograms/<&#tag>", "<&# is cold>")
        self._multiplexer.Reload()

        response = self._plugin._tags_impl(context.RequestContext(), "eid")

        expected_composite_description = (
            "<h1>Multiple descriptions</h1>\n"
            "<h2>For runs: &lt;&amp;#run3&gt;, &lt;&amp;#run4&gt;</h2>\n"
            "<p>&lt;&amp;# is cold&gt;</p>\n"
            "<h2>For run: &lt;&amp;#run2&gt;</h2>\n"
            "<p>&lt;&amp;# is hot&gt;</p>")
        self.assertEqual(
            {"scalars/<&#tag>": expected_composite_description},
            response["scalars"]["tagDescriptions"],
        )
        self.assertEqual(
            {"histograms/<&#tag>": expected_composite_description},
            response["histograms"]["tagDescriptions"],
        )
Пример #11
0
 def testValues(self):
     plugin = self.create_plugin()
     values = plugin.values_impl(context.RequestContext(), experiment="exp")
     self.assertItemsEqual([1.0, -1.0], values["run_1"][0])
     self.assertItemsEqual([0.5, -0.5], values["run_1"][1])
     self.assertItemsEqual([1.0, -1.0], values["run_2"][0])
     self.assertItemsEqual([-0.5, None], values["run_2"][1])
Пример #12
0
    def is_active(self):
        """The graphs plugin is active iff any run has a graph or metadata."""
        if self._data_provider:
            return False  # `list_plugins` as called by TB core suffices

        empty_context = context.RequestContext()  # not used
        return bool(self.info_impl(empty_context))
Пример #13
0
 def testMergedLayout(self):
     ctx = context.RequestContext()
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(ctx, "exp_id"), parsed_layout)
     correct_layout = layout_pb2.Layout(
         category=[
             # A category with this name is also present in a layout for a
             # different run (the logdir run)
             layout_pb2.Category(
                 title="cross entropy",
                 chart=[
                     layout_pb2.Chart(
                         title="cross entropy",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"cross entropy"],
                         ),
                     ),
                     layout_pb2.Chart(
                         title="cross entropy margin chart",
                         margin=layout_pb2.MarginChartContent(
                             series=[
                                 layout_pb2.MarginChartContent.Series(
                                     value="cross entropy",
                                     lower="cross entropy lower",
                                     upper="cross entropy upper",
                                 ),
                             ],
                         ),
                     ),
                 ],
                 closed=True,
             ),
             layout_pb2.Category(
                 title="mean biases",
                 chart=[
                     layout_pb2.Chart(
                         title="mean layer biases",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[
                                 r"mean/layer0/biases",
                                 r"mean/layer1/biases",
                             ],
                         ),
                     ),
                 ],
             ),
             layout_pb2.Category(
                 title="std weights",
                 chart=[
                     layout_pb2.Chart(
                         title="stddev layer weights",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"stddev/layer\d+/weights"],
                         ),
                     ),
                 ],
             ),
         ]
     )
     self.assertProtoEquals(correct_layout, parsed_layout)
Пример #14
0
 def testMetrics(self):
     plugin = self.create_plugin()
     metrics = plugin.metrics_impl(
         context.RequestContext(), experiment="exp",
     )
     self.assertItemsEqual(["A", "B"], metrics["run_1"])
     self.assertItemsEqual(["A", "B"], metrics["run_2"])
Пример #15
0
 def test_experiment_with_experiment_tag(self):
     experiment = """
         description: 'Test experiment'
         metric_infos: [
           { name: { tag: 'current_temp' } }
         ]
     """
     run = "exp"
     tag = metadata.EXPERIMENT_TAG
     m = summary_pb2.SummaryMetadata()
     m.data_class = summary_pb2.DATA_CLASS_TENSOR
     m.plugin_data.plugin_name = metadata.PLUGIN_NAME
     m.plugin_data.content = self._serialized_plugin_data(
         DATA_TYPE_EXPERIMENT, experiment)
     self._mock_multiplexer.AllSummaryMetadata.side_effect = None
     self._mock_multiplexer.AllSummaryMetadata.return_value = {
         run: {
             tag: m
         }
     }
     ctxt = backend_context.Context(self._mock_tb_context)
     request_ctx = context.RequestContext()
     self.assertProtoEquals(
         experiment,
         ctxt.experiment_from_metadata(
             request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")),
     )
Пример #16
0
    def test_info(self, plugin):
        expected = {
            "_RUN_WITH_GRAPH_WITH_METADATA": {
                "run": "_RUN_WITH_GRAPH_WITH_METADATA",
                "run_graph": True,
                "tags": {
                    "secret-stats": {
                        "conceptual_graph": False,
                        "profile": True,
                        "tag": "secret-stats",
                        "op_graph": False,
                    },
                },
            },
            "_RUN_WITH_GRAPH_WITHOUT_METADATA": {
                "run": "_RUN_WITH_GRAPH_WITHOUT_METADATA",
                "run_graph": True,
                "tags": {},
            },
            "_RUN_WITHOUT_GRAPH_WITH_METADATA": {
                "run": "_RUN_WITHOUT_GRAPH_WITH_METADATA",
                "run_graph": False,
                "tags": {
                    "secret-stats": {
                        "conceptual_graph": False,
                        "profile": True,
                        "tag": "secret-stats",
                        "op_graph": False,
                    },
                },
            },
        }

        actual = plugin.info_impl(context.RequestContext(), "eid")
        self.assertEqual(expected, actual)
Пример #17
0
    def _update_configs(self):
        """Updates `self._configs` and `self._run_paths`."""
        if self.data_provider and self.logdir:
            # Create a background context; we may not be in a request.
            ctx = context.RequestContext()
            run_paths = {
                run.run_name: os.path.join(self.logdir, run.run_name)
                for run in self.data_provider.list_runs(ctx, experiment_id="")
            }
        else:
            run_paths = {}
        run_paths_changed = run_paths != self._run_paths
        self._run_paths = run_paths

        run_path_pairs = list(self._run_paths.items())
        self._append_plugin_asset_directories(run_path_pairs)
        # Also accept the root logdir as a model checkpoint directory,
        # so that the projector still works when there are no runs.
        # (Case on `run` rather than `path` to avoid issues with
        # absolute/relative paths on any filesystems.)
        if "." not in self._run_paths:
            run_path_pairs.append((".", self.logdir))
        if run_paths_changed or _latest_checkpoints_changed(
            self._configs, run_path_pairs
        ):
            self.readers = {}
            self._configs, self.config_fpaths = self._read_latest_config_files(
                run_path_pairs
            )
            self._augment_configs_with_checkpoint_info()
Пример #18
0
    def test_image_bad_request(self):
        self._write_image("run1", "images/tagA", 1, None)
        self._multiplexer.Reload()

        invalid_sample = 999
        requests = [
            {
                "plugin": "images",
                "tag": "images/tagA",
                "sample": invalid_sample,
                "run": "run1",
            },
            {
                "plugin": "images",
                "tag": "images/tagA",
                "run": "run1"
            },
            {
                "plugin": "images",
                "tag": "images/tagA",
            },
        ]
        response = self._plugin._time_series_impl(context.RequestContext(),
                                                  "expid", requests)
        errors = [
            series_response.get("error", "") for series_response in response
        ]

        self.assertEqual(errors, ["", "Missing sample", "Missing run"])
Пример #19
0
    def test_tags(self):
        self._write_scalar("run1", "scalars/tagA", None)
        self._write_scalar("run1", "scalars/tagA", None)
        self._write_scalar("run1", "scalars/tagB", None)
        self._write_scalar("run2", "scalars/tagB", None)
        self._write_histogram("run1", "histograms/tagA", None)
        self._write_histogram("run1", "histograms/tagA", None)
        self._write_histogram("run1", "histograms/tagB", None)
        self._write_histogram("run2", "histograms/tagB", None)
        self._write_image("run1", "images/tagA", 1, None)
        self._write_image("run1", "images/tagA", 2, None)
        self._write_image("run1", "images/tagB", 3, None)
        self._write_image("run2", "images/tagB", 4, None)

        self._multiplexer.Reload()

        response = self._plugin._tags_impl(context.RequestContext(), "eid")

        self.assertEqual(
            {
                "runTagInfo": {
                    "run1": ["scalars/tagA", "scalars/tagB"],
                    "run2": ["scalars/tagB"],
                },
                "tagDescriptions": {},
            },
            response["scalars"],
        )
        self.assertEqual(
            {
                "runTagInfo": {
                    "run1": ["histograms/tagA", "histograms/tagB"],
                    "run2": ["histograms/tagB"],
                },
                "tagDescriptions": {},
            },
            response["histograms"],
        )
        self.assertEqual(
            {
                "tagDescriptions": {},
                "tagRunSampledInfo": {
                    "images/tagA": {
                        "run1": {
                            "maxSamplesPerStep": 2
                        }
                    },
                    "images/tagB": {
                        "run1": {
                            "maxSamplesPerStep": 3
                        },
                        "run2": {
                            "maxSamplesPerStep": 4
                        },
                    },
                },
            },
            response["images"],
        )
Пример #20
0
 def test_experiment_without_experiment_tag_many_distinct_values(self):
     self.session_1_start_info_ = """
         hparams:[
           {key: 'batch_size' value: {number_value: 100}},
           {key: 'lr' value: {string_value: '0.01'}}
         ]
     """
     self.session_2_start_info_ = """
         hparams:[
           {key: 'lr' value: {number_value: 0.02}},
           {key: 'model_type' value: {string_value: 'CNN'}}
         ]
     """
     self.session_3_start_info_ = """
         hparams:[
           {key: 'batch_size' value: {bool_value: true}},
           {key: 'model_type' value: {string_value: 'CNN'}}
         ]
     """
     expected_exp = """
         hparam_infos: {
           name: 'batch_size'
           type: DATA_TYPE_STRING
         }
         hparam_infos: {
           name: 'lr'
           type: DATA_TYPE_STRING
         }
         hparam_infos: {
           name: 'model_type'
           type: DATA_TYPE_STRING
           domain_discrete: {
             values: [{string_value: 'CNN'}]
           }
         }
         metric_infos: {
           name: {group: '', tag: 'accuracy'}
         }
         metric_infos: {
           name: {group: '', tag: 'loss'}
         }
         metric_infos: {
           name: {group: 'eval', tag: 'loss'}
         }
         metric_infos: {
           name: {group: 'train', tag: 'loss'}
         }
     """
     ctxt = backend_context.Context(
         self._mock_tb_context, max_domain_discrete_len=1
     )
     request_ctx = context.RequestContext()
     actual_exp = ctxt.experiment_from_metadata(
         request_ctx,
         "123",
         ctxt.hparams_metadata(request_ctx, "123"),
     )
     _canonicalize_experiment(actual_exp)
     self.assertProtoEquals(expected_exp, actual_exp)
Пример #21
0
 def testEmbeddings(self):
     plugin = self.create_plugin()
     embeddings = plugin.embeddings_impl(context.RequestContext(),
                                         experiment="exp")
     self.assertItemsEqual([1.0, 0.5], embeddings["run_1"][0])
     self.assertItemsEqual([-0.5, 0.5], embeddings["run_1"][1])
     self.assertItemsEqual([1.0, 0.5], embeddings["run_2"][0])
     self.assertItemsEqual([-0.5, 0.5], embeddings["run_2"][1])
Пример #22
0
 def test_args(self):
     auth = auth_lib.AuthContext({}, {"REQUEST_METHOD": "GET"})
     ctx = context.RequestContext(auth=auth,
                                  remote_ip=REMOTE_IP,
                                  x_forwarded_for=X_FORWARDED_FOR_IPS)
     self.assertEqual(ctx.auth, auth)
     self.assertEqual(ctx.remote_ip, REMOTE_IP)
     self.assertEqual(ctx.x_forwarded_for, X_FORWARDED_FOR_IPS)
Пример #23
0
 def test_run_metadata(self, plugin):
     ctx = context.RequestContext()
     result = plugin.run_metadata_impl(ctx, "123",
                                       _RUN_WITH_GRAPH_WITH_METADATA[0],
                                       self._METADATA_TAG)
     (metadata_pbtxt, mime_type) = result
     self.assertEqual(mime_type, "text/x-protobuf")
     text_format.Parse(metadata_pbtxt, config_pb2.RunMetadata())
Пример #24
0
 def testLayoutFromSingleRun(self):
     # The foo directory contains 1 single layout.
     ctx = context.RequestContext()
     local_plugin = self.createPlugin(os.path.join(self.logdir, "foo"))
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(
         local_plugin.layout_impl(ctx, "exp_id"), parsed_layout
     )
     self.assertProtoEquals(self.foo_layout, parsed_layout)
Пример #25
0
 def _get_graph(self, plugin, run=None, **kwargs):
     """Set up runs, then fetch and return the graph as a proto."""
     (graph_pbtxt, mime_type) = plugin.graph_impl(
         context.RequestContext(),
         run if run is not None else _RUN_WITH_GRAPH_WITH_METADATA[0],
         **kwargs,
     )
     self.assertEqual(mime_type, "text/x-protobuf")
     return text_format.Parse(graph_pbtxt, tf.compat.v1.GraphDef())
Пример #26
0
 def testTags(self):
     plugin = self.create_plugin()
     tags = plugin.tags_impl(context.RequestContext(), experiment="exp")
     tags = json.loads(tags)
     gt_runs = ["run_1", "run_2"]
     gt_tags = ["_npmi_/annotations", "_npmi_/metrics", "_npmi_/values"]
     self.assertItemsEqual(gt_runs, tags.keys())
     self.assertItemsEqual(gt_tags, tags["run_1"])
     self.assertItemsEqual(gt_tags, tags["run_2"])
Пример #27
0
 def _run_handler(self, request):
     request_proto = api_pb2.ListMetricEvalsRequest()
     text_format.Merge(request, request_proto)
     handler = list_metric_evals.Handler(
         context.RequestContext(),
         request_proto,
         self._mock_scalars_plugin,
         "exp_id",
     )
     return handler.run()
Пример #28
0
 def test_scalars_with_histogram(self):
     plugin = self.load_plugin([self._RUN_WITH_HISTOGRAM])
     with self.assertRaises(errors.NotFoundError):
         plugin.scalars_impl(
             context.RequestContext(),
             self._HISTOGRAM_TAG,
             self._RUN_WITH_HISTOGRAM,
             "eid",
             scalars_plugin.OutputFormat.JSON,
         )
Пример #29
0
    def testPrCurvesRaisesValueErrorWhenNoData(self):
        """Tests that the method for obtaining PR curve data raises a
        ValueError.

        The handler should raise a ValueError when no PR curve data can
        be found for a certain run-tag combination.
        """
        with six.assertRaisesRegex(self, ValueError,
                                   r"No PR curves could be found"):
            self.plugin.pr_curves_impl(context.RequestContext(), "123",
                                       ["colors"], "non_existent_tag")

        with six.assertRaisesRegex(self, ValueError,
                                   r"No PR curves could be found"):
            self.plugin.pr_curves_impl(
                context.RequestContext(),
                "123",
                ["non_existent_run"],
                "blue/pr_curves",
            )
Пример #30
0
    def is_active(self):
        """This plugin is active iff any run has at least one histograms
        tag."""
        if self._data_provider:
            return False  # `list_plugins` as called by TB core suffices

        if self._multiplexer:
            empty_context = context.RequestContext()  # not used
            return any(self.index_impl(empty_context, experiment="").values())

        return False