Esempio n. 1
0
 def testMergedLayout(self):
     ctx = context.RequestContext()
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(ctx, "exp_id"), parsed_layout)
     correct_layout = layout_pb2.Layout(
         category=[
             # A category with this name is also present in a layout for a
             # different run (the logdir run)
             layout_pb2.Category(
                 title="cross entropy",
                 chart=[
                     layout_pb2.Chart(
                         title="cross entropy",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"cross entropy"],
                         ),
                     ),
                     layout_pb2.Chart(
                         title="cross entropy margin chart",
                         margin=layout_pb2.MarginChartContent(
                             series=[
                                 layout_pb2.MarginChartContent.Series(
                                     value="cross entropy",
                                     lower="cross entropy lower",
                                     upper="cross entropy upper",
                                 ),
                             ],
                         ),
                     ),
                 ],
                 closed=True,
             ),
             layout_pb2.Category(
                 title="mean biases",
                 chart=[
                     layout_pb2.Chart(
                         title="mean layer biases",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[
                                 r"mean/layer0/biases",
                                 r"mean/layer1/biases",
                             ],
                         ),
                     ),
                 ],
             ),
             layout_pb2.Category(
                 title="std weights",
                 chart=[
                     layout_pb2.Chart(
                         title="stddev layer weights",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"stddev/layer\d+/weights"],
                         ),
                     ),
                 ],
             ),
         ]
     )
     self.assertProtoEquals(correct_layout, parsed_layout)
Esempio n. 2
0
    def _dump_tensors(self):
        if not self._has_recorded_tensor:
            return

        layout_categories = []

        for scope in self._scope_tensor:
            chart = []
            for name in self._scope_tensor[scope]:
                chart.append(
                    layout_pb2.Chart(
                        title=name,
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'name(?!.*margin.*)'.replace('name', name)
                                 ])))
            category = layout_pb2.Category(title=scope, chart=chart)
            layout_categories.append(category)

        if layout_categories:
            layout_proto_to_write = layout_pb2.Layout(
                category=layout_categories)

            try:
                # Load former layout_proto from self._layout_writer_dir.
                multiplexer = event_multiplexer.EventMultiplexer()
                multiplexer.AddRunsFromDirectory(self._layout_writer_dir)
                multiplexer.Reload()
                tensor_events = multiplexer.Tensors(
                    '.', metadata.CONFIG_SUMMARY_TAG)
                shutil.rmtree(self._layout_writer_dir)

                # Parse layout proto from disk.
                string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
                content = np.asscalar(string_array)
                layout_proto_from_disk = layout_pb2.Layout()
                layout_proto_from_disk.ParseFromString(
                    tf.compat.as_bytes(content))

                # Merge two layout proto.
                merged_layout_json = merge(
                    json_format.MessageToJson(layout_proto_from_disk),
                    json_format.MessageToJson(layout_proto_to_write))
                merged_layout_proto = layout_pb2.Layout()
                json_format.Parse(str(merged_layout_json), merged_layout_proto)

                self._layout_writer = tf.summary.FileWriter(
                    self._layout_writer_dir)
                layout_summary = summary_lib.custom_scalar_pb(
                    merged_layout_proto)
                self._layout_writer.add_summary(layout_summary)
                self._layout_writer.close()
            except KeyError:
                # Write the current layout proto into disk
                # when there is no layout.
                self._layout_writer = tf.summary.FileWriter(
                    self._layout_writer_dir)
                layout_summary = summary_lib.custom_scalar_pb(
                    layout_proto_to_write)
                self._layout_writer.add_summary(layout_summary)
                self._layout_writer.close()
 def testMergedLayout(self):
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(), parsed_layout)
     correct_layout = layout_pb2.Layout(category=[
         layout_pb2.Category(
             title='cross entropy',
             chart=[
                 # Note that the "cross entropy 2" chart from layout for run
                 # foo is now merged within the existing "cross entropy"
                 # category because the categories have the same names.
                 layout_pb2.Chart(title='cross entropy',
                                  tag=[r'cross entropy']),
                 layout_pb2.Chart(title='cross entropy 2',
                                  tag=[r'cross entropy 2']),
             ],
             closed=True),
         layout_pb2.Category(
             title='mean biases',
             chart=[
                 layout_pb2.Chart(
                     title='mean layer biases',
                     tag=[r'mean/layer0/biases', r'mean/layer1/biases'])
             ]),
         layout_pb2.Category(title='std weights',
                             chart=[
                                 layout_pb2.Chart(
                                     title='stddev layer weights',
                                     tag=[r'stddev/layer\d+/weights'])
                             ]),
     ])
     self.assertProtoEquals(correct_layout, parsed_layout)
Esempio n. 4
0
    def testSetLayout(self):
        layout_proto_to_write = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'mean/layer\d+/biases'], )),
                ]),
            layout_pb2.Category(
                title='std weights',
                chart=[
                    layout_pb2.Chart(
                        title='stddev layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'stddev/layer\d+/weights'], )),
                ]),
            layout_pb2.Category(
                title='cross entropy ... and maybe some other values',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                    layout_pb2.Chart(
                        title='accuracy',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='accuracy',
                                lower='accuracy_lower_margin',
                                upper='accuracy_upper_margin')
                        ])),
                    layout_pb2.Chart(
                        title='max layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'max/layer1/.*', r'max/layer2/.*'], )),
                ],
                closed=True)
        ])

        # Write the data as a summary for the '.' run.
        with tf.Session() as s, tf.summary.FileWriter(self.logdir) as writer:
            writer.add_summary(s.run(summary.op(layout_proto_to_write)))

        # Read the data from disk.
        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(self.logdir)
        multiplexer.Reload()
        tensor_events = multiplexer.Tensors('.', metadata.CONFIG_SUMMARY_TAG)
        self.assertEqual(1, len(tensor_events))

        # Parse the data.
        string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
        content = np.asscalar(string_array)
        layout_proto_from_disk = layout_pb2.Layout()
        layout_proto_from_disk.ParseFromString(tf.compat.as_bytes(content))

        # Verify the content.
        self.assertProtoEquals(layout_proto_to_write, layout_proto_from_disk)
    def __init__(self, *args, **kwargs):
        super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
        self.logdir = os.path.join(self.get_temp_dir(), 'logdir')
        os.makedirs(self.logdir)

        self.logdir_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(title='cross entropy',
                                chart=[
                                    layout_pb2.Chart(title='cross entropy',
                                                     tag=[r'cross entropy']),
                                ],
                                closed=True)
        ])
        self.foo_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        tag=[r'mean/layer0/biases', r'mean/layer1/biases'])
                ]),
            layout_pb2.Category(title='std weights',
                                chart=[
                                    layout_pb2.Chart(
                                        title='stddev layer weights',
                                        tag=[r'stddev/layer\d+/weights'])
                                ]),
            # A category with this name is also present in a layout for a
            # different run (the logdir run)
            layout_pb2.Category(title='cross entropy',
                                chart=[
                                    layout_pb2.Chart(title='cross entropy 2',
                                                     tag=[r'cross entropy 2']),
                                ])
        ])

        # Generate test data.
        with tf.summary.FileWriter(os.path.join(self.logdir, 'foo')) as writer:
            writer.add_summary(summary.pb(self.foo_layout))
            for step in range(4):
                writer.add_summary(scalar_summary.pb('squares', step * step),
                                   step)

        with tf.summary.FileWriter(os.path.join(self.logdir, 'bar')) as writer:
            for step in range(3):
                writer.add_summary(scalar_summary.pb('increments', step + 1),
                                   step)

        # The '.' run lacks scalar data but has a layout.
        with tf.summary.FileWriter(self.logdir) as writer:
            writer.add_summary(summary.pb(self.logdir_layout))

        self.plugin = self.createPlugin(self.logdir)
Esempio n. 6
0
def custom_scalars(layout):
    categories = []
    for k, v in layout.items():
        charts = []
        for chart_name, chart_meatadata in v.items():
            tags = chart_meatadata[1]
            if chart_meatadata[0] == "Margin":
                assert len(tags) == 3
                mgcc = layout_pb2.MarginChartContent(series=[
                    layout_pb2.MarginChartContent.Series(
                        value=tags[0], lower=tags[1], upper=tags[2])
                ])
                chart = layout_pb2.Chart(title=chart_name, margin=mgcc)
            else:
                mlcc = layout_pb2.MultilineChartContent(tag=tags)
                chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)
            charts.append(chart)
        categories.append(layout_pb2.Category(title=k, chart=charts))

    layout = layout_pb2.Layout(category=categories)
    plugin_data = SummaryMetadata.PluginData(plugin_name="custom_scalars")
    smd = SummaryMetadata(plugin_data=plugin_data)
    tensor = TensorProto(
        dtype="DT_STRING",
        string_val=[layout.SerializeToString()],
        tensor_shape=TensorShapeProto(),
    )
    return Summary(value=[
        Summary.Value(
            tag="custom_scalars__config__", tensor=tensor, metadata=smd)
    ])
Esempio n. 7
0
def custom_board_generator(graph):
    # We make the coefficient and scaled coefficient charts first because we need to do it dynamically.
    coeff_chart = [
        layout_pb2.Chart(title='Coeff_' + str(idx),
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r'Coeff_' + str(idx) + '_Comp_*']))
        for idx in np.arange(len(graph.PI_costs))
    ]
    coeff_scaled_chart = [
        layout_pb2.Chart(title='Scaled_Coeff_' + str(idx),
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r'Scaled_Coeff_' + str(idx) + '_Comp_*']))
        for idx in np.arange(len(graph.PI_costs))
    ]

    # Actually making the board
    custom_board = custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(title='Training',
                chart=[layout_pb2.Chart(title='MSE_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'MSE_cost_*'])),
                       layout_pb2.Chart(title='PI_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'PI_cost_*'])),
                       layout_pb2.Chart(title='L1_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'L1_cost_*'])),
                       layout_pb2.Chart(title='Total_cost', multiline=layout_pb2.MultilineChartContent(tag=['Total_cost'])),
                       layout_pb2.Chart(title='Gradloss', multiline=layout_pb2.MultilineChartContent(tag=['Loss_Grad']))\
                        ]
            ),
        layout_pb2.Category(title='Coefficients', chart=coeff_chart),
        layout_pb2.Category(title='Scaled_Coefficients', chart=coeff_scaled_chart)
            ]))

    return custom_board
Esempio n. 8
0
def finalize_autosummaries() -> None:
    """Create the necessary ops to include autosummaries in TensorBoard report.
    Note: This should be done only once per graph.
    """
    global _finalized
    tfutil.assert_tf_initialized()

    if _finalized:
        return None

    _finalized = True
    tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])

    # Create summary ops.
    with tf.device(None), tf.control_dependencies(None):
        for name, vars_list in _vars.items():
            name_id = name.replace("/", "_")
            with tfutil.absolute_name_scope("Autosummary/" + name_id):
                moments = tf.add_n(vars_list)
                moments /= moments[0]
                with tf.control_dependencies([moments]):  # read before resetting
                    reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
                    with tf.name_scope(None), tf.control_dependencies(reset_ops):  # reset before reporting
                        mean = moments[1]
                        std = tf.sqrt(moments[2] - tf.square(moments[1]))
                        tf.summary.scalar(name, mean)
                        if enable_custom_scalars:
                            tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
                            tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)

    # Setup layout for custom scalars.
    layout = None
    if enable_custom_scalars:
        cat_dict = OrderedDict()
        for series_name in sorted(_vars.keys()):
            p = series_name.split("/")
            cat = p[0] if len(p) >= 2 else ""
            chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
            if cat not in cat_dict:
                cat_dict[cat] = OrderedDict()
            if chart not in cat_dict[cat]:
                cat_dict[cat][chart] = []
            cat_dict[cat][chart].append(series_name)
        categories = []
        for cat_name, chart_dict in cat_dict.items():
            charts = []
            for chart_name, series_names in chart_dict.items():
                series = []
                for series_name in series_names:
                    series.append(layout_pb2.MarginChartContent.Series(
                        value=series_name,
                        lower="xCustomScalars/" + series_name + "/margin_lo",
                        upper="xCustomScalars/" + series_name + "/margin_hi"))
                margin = layout_pb2.MarginChartContent(series=series)
                charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
            categories.append(layout_pb2.Category(title=cat_name, chart=charts))
        layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
    return layout
Esempio n. 9
0
 def testLayoutFromSingleRun(self):
     # The foo directory contains 1 single layout.
     ctx = context.RequestContext()
     local_plugin = self.createPlugin(os.path.join(self.logdir, "foo"))
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(
         local_plugin.layout_impl(ctx, "exp_id"), parsed_layout
     )
     self.assertProtoEquals(self.foo_layout, parsed_layout)
Esempio n. 10
0
 def testMergedLayout(self):
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(), parsed_layout)
     correct_layout = layout_pb2.Layout(category=[
         # A category with this name is also present in a layout for a
         # different run (the logdir run)
         layout_pb2.Category(
             title='cross entropy',
             chart=[
                 layout_pb2.Chart(
                     title='cross entropy',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'cross entropy'], )),
                 layout_pb2.Chart(
                     title='cross entropy margin chart',
                     margin=layout_pb2.MarginChartContent(series=[
                         layout_pb2.MarginChartContent.Series(
                             value='cross entropy',
                             lower='cross entropy lower',
                             upper='cross entropy upper'),
                     ], )),
             ],
             closed=True,
         ),
         layout_pb2.Category(
             title='mean biases',
             chart=[
                 layout_pb2.Chart(
                     title='mean layer biases',
                     multiline=layout_pb2.MultilineChartContent(tag=[
                         r'mean/layer0/biases', r'mean/layer1/biases'
                     ], )),
             ]),
         layout_pb2.Category(
             title='std weights',
             chart=[
                 layout_pb2.Chart(
                     title='stddev layer weights',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'stddev/layer\d+/weights'], )),
             ]),
     ])
     self.assertProtoEquals(correct_layout, parsed_layout)
Esempio n. 11
0
def get_layout_summary(
    *,
    metrics: Tuple[DictConfig, ...],
    tasks: Tuple[DictConfig, ...],
    closed: bool = False,
):
    """Builds a summary that describes custom scalars layout for TensorBoard.

    At each evaluation step, we compute metrics for multiple tasks sampled from
    the same distribution and estimate the mean and std value of the metric.
    The layout specified by this function nicely groups metrics and defines
    margin charts that use fill area to visualize lower and upper bounds.

    Parameters
    ----------
    metrics : tuple of DictConfigs

    tasks : tuple of DictConfigs

    closed : bool (default: False)

    Returns
    -------
      A summary proto containing the layout.
    """
    set_names = sorted(set(t.set_name for t in tasks))
    task_regimes = sorted(set(t.regime for t in tasks))
    layout_summary = summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            # Category for each metric.
            layout_pb2.Category(
                title=m.name,
                chart=[
                    # A chart for each type of the eval distribution.
                    layout_pb2.Chart(
                        title=f"{t_regime}/{m.name} (CI {m.ci:.0f}%)",
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value=
                                f"{s_name}/{t_regime}/{m.name}_mean/scalar_summary",
                                lower=
                                f"{s_name}/{t_regime}/{m.name}_lower/scalar_summary",
                                upper=
                                f"{s_name}/{t_regime}/{m.name}_upper/scalar_summary",
                            ) for s_name in set_names
                        ]),
                    ) for t_regime in task_regimes
                ],
                closed=closed,
            ) for m in metrics
        ]))
    return layout_summary
def tb_layout():
    episode_rewards = layout_pb2.Category(
        title='Episode Reward',
        chart=gen_multiline_charts([
            ("Shaped Reward", [r'shaping/eprewmean_true']),
            ("Episode Length", [r'eplenmean']),
            ("Sparse Reward", [r'shaping/epsparsemean']),
            ("Dense Reward", [r'shaping/epdensemean']),
            ("Dense Reward Annealing", [r'shaping/rew_anneal_c']),
            ("Unshaped Reward", [r'ep_rewmean']),
            ("Victim Action Noise", [r'shaping/victim_noise'])
        ]),
    )

    game_outcome = layout_pb2.Category(
        title="Game Outcomes",
        chart=gen_multiline_charts([
            ("Agent 0 Win Proportion", [r'game_win0']),
            ("Agent 1 Win Proportion", [r'game_win1']),
            ("Tie Proportion", [r'game_tie']),
            ("# of games", [r'game_total']),
        ]),
    )

    training = layout_pb2.Category(
        title="Training",
        chart=gen_multiline_charts([
            ("Policy Loss", [r'policy_loss']),
            ("Value Loss", [r'value_loss']),
            ("Policy Entropy", [r'policy_entropy']),
            ("Explained Variance", [r'explained_variance']),
            ("Approx KL", [r'approxkl']),
            ("Clip Fraction", [r'clipfrac']),
        ])
    )

    # Intentionally unused:
    # + serial_timesteps (just total_timesteps / num_envs)
    # + time_elapsed (TensorBoard already logs wall-clock time)
    # + nupdates (this is already logged as step)
    time = layout_pb2.Category(
        title="Time",
        chart=gen_multiline_charts([
            ("Total Timesteps", [r'total_timesteps']),
            ("FPS", [r'fps']),
        ])
    )

    categories = [episode_rewards, game_outcome, training, time]
    return summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
Esempio n. 13
0
    def layout_impl(self):
        # Keep a mapping between and category so we do not create duplicate
        # categories.
        title_to_category = {}

        merged_layout = None
        runs = list(
            self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
        runs.sort()
        for run in runs:
            tensor_events = self._multiplexer.Tensors(
                run, metadata.CONFIG_SUMMARY_TAG)

            # This run has a layout. Merge it with the ones currently found.
            if USE_TF:
                string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
            else:
                string_array = tf.tensor_manip.make_ndarray(
                    tensor_events[0].tensor_proto)
            content = np.asscalar(string_array)
            layout_proto = layout_pb2.Layout()
            layout_proto.ParseFromString(tf.compat.as_bytes(content))

            if merged_layout:
                # Append the categories within this layout to the merged layout.
                for category in layout_proto.category:
                    if category.title in title_to_category:
                        # A category with this name has been seen before. Do not create a
                        # new one. Merge their charts, skipping any duplicates.
                        title_to_category[category.title].chart.extend([
                            c for c in category.chart
                            if c not in title_to_category[category.title].chart
                        ])
                    else:
                        # This category has not been seen before.
                        merged_layout.category.add().MergeFrom(category)
                        title_to_category[category.title] = category
            else:
                # This is the first layout encountered.
                merged_layout = layout_proto
                for category in layout_proto.category:
                    title_to_category[category.title] = category

        if merged_layout:
            return json_format.MessageToJson(
                merged_layout, including_default_value_fields=True)
        else:
            # No layout was found.
            return {}
Esempio n. 14
0
def get_layout_summary():
    return summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="metrics",
                chart=[
                    layout_pb2.Chart(
                        title="losses",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=['train/loss', 'eval/loss'])),
                    layout_pb2.Chart(
                        title="rmsles",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=['train/rmsle', 'eval/rmsle'])),
                ])
        ]))
Esempio n. 15
0
def layout_dashboard(writer):
    from tensorboard import summary
    from tensorboard.plugins.custom_scalar import layout_pb2

    # This action does not have to be performed at every step, so the action is not
    # taken care of by an op in the graph. We only need to specify the layout once. 
    # We only need to specify the layout once (instead of per step).
    layout_summary = summary.custom_scalar_pb(layout_pb2.Layout(
        category=[
            layout_pb2.Category(
                title='losses',
                chart=[
                    layout_pb2.Chart(
                        title='losses',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'loss.*'],
                        )),
                    layout_pb2.Chart(
                        title='baz',
                        margin=layout_pb2.MarginChartContent(
                            series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='baz_lower/baz/scalar_summary',
                                    upper='baz_upper/baz/scalar_summary'),
                            ],
                        )),
                ]),
            layout_pb2.Category(
                title='trig functions',
                chart=[
                    layout_pb2.Chart(
                        title='wave trig functions',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'trigFunctions/cosine', r'trigFunctions/sine'],
                        )),
                    # The range of tangent is different. Let's give it its own chart.
                    layout_pb2.Chart(
                        title='tan',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'trigFunctions/tangent'],
                        )),
                ],
                # This category we care less about. Let's make it initially closed.
                closed=True),
        ]))
    writer.add_summary(layout_summary)
Esempio n. 16
0
    def layout_impl(self, ctx, experiment):
        # Keep a mapping between and category so we do not create duplicate
        # categories.
        title_to_category = {}

        merged_layout = None
        data = self._data_provider.read_tensors(
            ctx,
            experiment_id=experiment,
            plugin_name=metadata.PLUGIN_NAME,
            run_tag_filter=provider.RunTagFilter(
                tags=[metadata.CONFIG_SUMMARY_TAG]),
            downsample=1,
        )
        for run in sorted(data):
            points = data[run][metadata.CONFIG_SUMMARY_TAG]
            content = points[0].numpy.item()
            layout_proto = layout_pb2.Layout()
            layout_proto.ParseFromString(tf.compat.as_bytes(content))

            if merged_layout:
                # Append the categories within this layout to the merged layout.
                for category in layout_proto.category:
                    if category.title in title_to_category:
                        # A category with this name has been seen before. Do not create a
                        # new one. Merge their charts, skipping any duplicates.
                        title_to_category[category.title].chart.extend([
                            c for c in category.chart
                            if c not in title_to_category[category.title].chart
                        ])
                    else:
                        # This category has not been seen before.
                        merged_layout.category.add().MergeFrom(category)
                        title_to_category[category.title] = category
            else:
                # This is the first layout encountered.
                merged_layout = layout_proto
                for category in layout_proto.category:
                    title_to_category[category.title] = category

        if merged_layout:
            return json_format.MessageToJson(
                merged_layout, including_default_value_fields=True)
        else:
            # No layout was found.
            return {}
Esempio n. 17
0
def create_layout_summary():
    return cs_summary.pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="losses",
                chart=[
                    layout_pb2.Chart(
                        title="losses",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"loss(?!.*margin.*)"], ),
                    ),
                    layout_pb2.Chart(
                        title="baz",
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value="loss/baz",
                                lower="loss/baz_lower",
                                upper="loss/baz_upper",
                            ),
                        ], ),
                    ),
                ],
            ),
            layout_pb2.Category(
                title="trig functions",
                chart=[
                    layout_pb2.Chart(
                        title="wave trig functions",
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r"trigFunctions/cosine",
                            r"trigFunctions/sine",
                        ], ),
                    ),
                    # The range of tangent is different. Give it its own chart.
                    layout_pb2.Chart(
                        title="tan",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"trigFunctions/tangent"], ),
                    ),
                ],
                # This category we care less about. Make it initially closed.
                closed=True,
            ),
        ]))
Esempio n. 18
0
def get_layout_summary(metric_shapes,
                       index2class_name,
                       log_names=['train', 'val', 'test']):
    categories = []
    concatinated_charts = []
    for metric_name, metric_shape in metric_shapes.items():
        charts = get_metric_charts(metric_name, metric_shape, index2class_name,
                                   log_names)
        categories.append(get_layout_category(title=metric_name,
                                              charts=charts))
        tags = [
            '{}_ALL/{}'.format(metric_name, log_name) for log_name in log_names
        ]
        concatinated_charts.append(
            get_layout_chart(title=metric_name, tags=tags))
    categories.append(
        get_layout_category(title='ALL', charts=concatinated_charts))
    return tensorboard.summary.custom_scalar_pb(
        layout_pb2.Layout(category=categories))
 def _init_custom_scalar_layout(self):
     layout = layout_pb2.Layout(
         category=[
             layout_pb2.Category(
                 title="mean rewards",
                 chart=[
                     layout_pb2.Chart(
                         title="mean rewards per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean rewards"]
                         ),
                     )
                 ],
             ),
             layout_pb2.Category(
                 title="mean episode lengths",
                 chart=[
                     layout_pb2.Chart(
                         title="mean episode length per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean episode lengths"]
                         ),
                     )
                 ],
             ),
             layout_pb2.Category(
                 title="mean fruits eaten",
                 chart=[
                     layout_pb2.Chart(
                         title="mean fruits eaten per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean fruits eaten"]
                         ),
                     )
                 ],
             ),
         ]
     )
     self.writer.add_summary(summary.custom_scalar_pb(layout))
Esempio n. 20
0
def add_custom_scalar(logdir):
    summary_writer = SummaryWriterCache.get(logdir)
    layout_summary = summary.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='Loss',
                chart=[
                    layout_pb2.Chart(
                        title='Loss',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'1_loss/*'], )),
                    layout_pb2.Chart(
                        title='Loss Component',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'2_loss_component/*'], )),
                    layout_pb2.Chart(
                        title='Discriminator Values',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'3_discriminator_values/*'], )),
                    layout_pb2.Chart(
                        title='Variation of sequences',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Stddev/*'], )),
                    layout_pb2.Chart(
                        title='BLOMSUM45',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/BLOMSUM45'], )),
                    layout_pb2.Chart(
                        title='Evalue',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/Evalue'], )),
                    layout_pb2.Chart(
                        title='Identity',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/Identity'], )),
                ]),
        ]))
    summary_writer.add_summary(layout_summary)
import argparse
from pathlib import Path

import tensorflow as tf

from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2

layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[
    layout_pb2.Category(
        title='Losses',
        chart=[
            layout_pb2.Chart(
                title='Train', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/train/l1'])),
            layout_pb2.Chart(
                title='Val', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/val/mse'])),
        ])
]))

parser = argparse.ArgumentParser()
parser.add_argument('folder', help='The log folder to place the layout in')
args = parser.parse_args()

folder = (Path(args.folder) / 'layout').expanduser().resolve()
with tf.summary.FileWriter(folder) as writer:
    writer.add_summary(layout_summary)

print('Layout saved to', folder)
    def __init__(self, *args, **kwargs):
        super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
        self.logdir = os.path.join(self.get_temp_dir(), "logdir")
        os.makedirs(self.logdir)

        self.logdir_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="cross entropy",
                chart=[
                    layout_pb2.Chart(
                        title="cross entropy",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"cross entropy"], ),
                    ),
                ],
                closed=True,
            )
        ])
        self.foo_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="mean biases",
                chart=[
                    layout_pb2.Chart(
                        title="mean layer biases",
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r"mean/layer0/biases",
                            r"mean/layer1/biases",
                        ], ),
                    ),
                ],
            ),
            layout_pb2.Category(
                title="std weights",
                chart=[
                    layout_pb2.Chart(
                        title="stddev layer weights",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"stddev/layer\d+/weights"], ),
                    ),
                ],
            ),
            # A category with this name is also present in a layout for a
            # different run (the logdir run) and also contains a duplicate chart
            layout_pb2.Category(
                title="cross entropy",
                chart=[
                    layout_pb2.Chart(
                        title="cross entropy margin chart",
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value="cross entropy",
                                lower="cross entropy lower",
                                upper="cross entropy upper",
                            ),
                        ], ),
                    ),
                    layout_pb2.Chart(
                        title="cross entropy",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"cross entropy"], ),
                    ),
                ],
            ),
        ])

        # Generate test data.
        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        "foo")) as writer:
            writer.add_summary(
                test_util.ensure_tb_summary_proto(summary.pb(self.foo_layout)))
            for step in range(4):
                writer.add_summary(
                    test_util.ensure_tb_summary_proto(
                        scalar_summary.pb("squares", step * step)),
                    step,
                )

        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        "bar")) as writer:
            for step in range(3):
                writer.add_summary(
                    test_util.ensure_tb_summary_proto(
                        scalar_summary.pb("increments", step + 1)),
                    step,
                )

        # The '.' run lacks scalar data but has a layout.
        with test_util.FileWriterCache.get(self.logdir) as writer:
            writer.add_summary(
                test_util.ensure_tb_summary_proto(
                    summary.pb(self.logdir_layout)))

        self.plugin = self.createPlugin(self.logdir)
Esempio n. 23
0
def run():
    """Run custom scalar demo and generate event files."""
    step = tf.placeholder(tf.float32, shape=[])

    with tf.name_scope('loss'):
        # Specify 2 different loss values, each tagged differently.
        summary_lib.scalar('foo', tf.pow(0.9, step))
        summary_lib.scalar('bar', tf.pow(0.85, step + 2))

        # Log metric baz as well as upper and lower bounds for a margin chart.
        middle_baz_value = step + 4 * tf.random_uniform([]) - 2
        summary_lib.scalar('baz', middle_baz_value)
        summary_lib.scalar('baz_lower',
                           middle_baz_value - 6.42 - tf.random_uniform([]))
        summary_lib.scalar('baz_upper',
                           middle_baz_value + 6.42 + tf.random_uniform([]))

    with tf.name_scope('trigFunctions'):
        summary_lib.scalar('cosine', tf.cos(step))
        summary_lib.scalar('sine', tf.sin(step))
        summary_lib.scalar('tangent', tf.tan(step))

    merged_summary = tf.summary.merge_all()

    with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
        # We only need to specify the layout once (instead of per step).
        layout_summary = summary_lib.custom_scalar_pb(
            layout_pb2.Layout(category=[
                layout_pb2.Category(
                    title='losses',
                    chart=[
                        layout_pb2.Chart(
                            title='losses',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'loss(?!.*margin.*)'], )),
                        layout_pb2.Chart(
                            title='baz',
                            margin=layout_pb2.MarginChartContent(series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='loss/baz_lower/scalar_summary',
                                    upper='loss/baz_upper/scalar_summary'),
                            ], )),
                    ]),
                layout_pb2.Category(
                    title='trig functions',
                    chart=[
                        layout_pb2.Chart(
                            title='wave trig functions',
                            multiline=layout_pb2.MultilineChartContent(tag=[
                                r'trigFunctions/cosine', r'trigFunctions/sine'
                            ], )),
                        # The range of tangent is different. Give it its own chart.
                        layout_pb2.Chart(
                            title='tan',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'trigFunctions/tangent'], )),
                    ],
                    # This category we care less about. Make it initially closed.
                    closed=True),
            ]))
        writer.add_summary(layout_summary)

        for i in xrange(42):
            summary = sess.run(merged_summary, feed_dict={step: i})
            writer.add_summary(summary, global_step=i)
Esempio n. 24
0
 layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(
     category=[
         layout_pb2.Category(
             title='losses',
             chart=[
                 layout_pb2.Chart(
                     title='losses',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'loss(?!.*margin.*)'],
                     )),
                 layout_pb2.Chart(
                     title='baz',
                     margin=layout_pb2.MarginChartContent(
                         series=[
                             layout_pb2.MarginChartContent.Series(
                                 value='loss/baz/scalar_summary',
                                 lower='loss/baz_lower_margin/scalar_summary',
                                 upper='loss/baz_upper_margin/scalar_summary'),
                         ],
                     )),
             ]),
         layout_pb2.Category(
             title='trig functions',
             chart=[
                 layout_pb2.Chart(
                     title='wave trig functions',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'trigFunctions/cosine', r'trigFunctions/sine'],
                     )),
                 # The range of tangent is different. Give it its own chart.
                 layout_pb2.Chart(
                     title='tan',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'trigFunctions/tangent'],
                     )),
             ],
             # This category we care less about. Lets make it initially closed.
             closed=True),
     ]))
Esempio n. 25
0
    def graph_setup(self):
        """
        Set up the computation graph for the neural network based on the parameters set at initialization
        """
        with self.graph.as_default():

            #######################
            # Define placeholders #
            #######################
            self.gamma = tf.placeholder(tf.float32, shape=[], name='gamma')
            self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
            self.reg_loss_factor = tf.placeholder(tf.float32, shape=[], name='reg_loss_factor')
            self.nloc_factor = tf.placeholder(tf.float32, shape=[], name='nloc_factor')
            self.total_latent_size = np.sum(self.latent_sizes)

            self.inputs = [tf.placeholder(tf.float32, [None, self.input_sizes[k]], name='input{}'.format(k))
                           for k in range(self.encoder_num)]

            self.question_inputs = [
                tf.placeholder(tf.float32, shape=[None, self.question_sizes[i]], name='q_dec{}'.format(i))
                for i in range(self.decoder_num)
            ]

            self.answers = [
                tf.placeholder(tf.float32, shape=[None, self.answer_sizes[i]], name='q_dec{}'.format(i))
                for i in range(self.decoder_num)
            ]

            self.select_noise = [
                tf.placeholder(tf.float32, shape=[None, self.total_latent_size], name='select_noise_{}'.format(i))
                for i in range(self.decoder_num)
            ]

            def fc_layer(in_layer, num_outputs, activation_fn, collection='std'):
                return fully_connected(in_layer, num_outputs, activation_fn,
                                       weights_regularizer=l2_regularizer(1.),
                                       biases_regularizer=l2_regularizer(1.),
                                       variables_collections=[collection])

            ##########################################
            # Set up variables and computation graph #
            ##########################################
            self.individual_latent = []
            for k in range(self.encoder_num):
                with tf.variable_scope('encoder_{}'.format(k)):
                    temp_layer = self.inputs[k]
                    for n in self.encoder_num_units:
                        temp_layer = fc_layer(temp_layer, num_outputs=n, activation_fn=tf.nn.elu)
                    self.individual_latent.append(fc_layer(temp_layer, num_outputs=self.latent_sizes[k], activation_fn=tf.identity))

            with tf.variable_scope('latent_layer'):
                self.full_latent = tf.concat(self.individual_latent, axis=1)
                latent_std = tf.math.sqrt(tf.nn.moments(self.full_latent, axes=[0])[1])
                self.select_logs = []
                self.dec_inputs = []
                for n in range(self.decoder_num):
                    with tf.variable_scope('select_dec{}'.format(n)):
                        selectors = tf.get_variable('sf_log',
                                                    initializer=tf.initializers.constant(-10.),
                                                    shape=self.total_latent_size,
                                                    collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'sel'])
                        self.select_logs.append(selectors)
                        self.dec_inputs.append(self.full_latent + latent_std * tf.exp(selectors) * self.select_noise[n])

            self.outputs = []
            for n in range(self.decoder_num):
                with tf.variable_scope('dec{}'.format(n)):
                    temp_layer = tf.concat([self.dec_inputs[n], self.question_inputs[n]], axis=1, name='dec_in')

                    for q in self.decoder_num_units:
                        temp_layer = fc_layer(temp_layer, num_outputs=q, activation_fn=tf.nn.elu)

                    out = np.pi / 2. * fc_layer(temp_layer, num_outputs=self.answer_sizes[n], activation_fn=tf.identity)

                self.outputs.append(out)

            #####################
            # Cost and training #
            #####################
            with tf.name_scope('cost'):
                sel_cost_list = []
                ans_cost_list = []
                for n in range(self.decoder_num):
                    sel_cost_list.append(tf.reduce_mean(self.select_logs[n]))
                    ans_cost_list.append(tf.reduce_mean(tf.reduce_sum(tf.squared_difference(self.answers[n], self.outputs[n]), axis=1)))

                self.cost_select = (-1) * tf.add_n(sel_cost_list)
                loc_cut = int(ceil(self.decoder_num / 2))
                self.cost_loc = tf.add_n([ans_cost_list[i] for i in range(0, loc_cut)], name='cost_local')
                self.cost_nloc = tf.add_n([ans_cost_list[i] for i in range(loc_cut, self.decoder_num)], name='cost_local')
                self.weighted_cost = (self.cost_loc + self.nloc_factor * self.cost_nloc) / (1. + self.nloc_factor)

            with tf.name_scope('reg_loss'):
                self.reg_loss = tf.losses.get_regularization_loss()

            with tf.name_scope('optimizer'):
                optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)

                self.training_op = self.train_op_from_loss(optimizer, self.weighted_cost)
                self.pretraining_op = self.train_op_from_loss(optimizer, self.weighted_cost, collections=['std', 'loc_decoder', 'nloc_decoder'])

            #########################
            # Tensorboard summaries #
            #########################

            chart = []
            for i in range(self.decoder_num):
                chart.append(layout_pb2.Chart(
                    title='Decoder {}'.format(i),
                    multiline=layout_pb2.MultilineChartContent(
                        tag=[r'^sf_log_{}'.format(i)]
                    )
                ))

            layout_summary = summary_lib.custom_scalar_pb(
                layout_pb2.Layout(category=[
                    layout_pb2.Category(
                        title='Select factors',
                        chart=chart)
                ])
            )

            tf.summary.scalar('cost_select', self.cost_select, collections=['vd'])
            tf.summary.scalar('cost', self.weighted_cost, collections=['vd'])
            tf.summary.scalar('cost_td', self.weighted_cost, collections=['td'])
            tf.summary.scalar('cost_loc', self.cost_loc, collections=['vd'])
            tf.summary.scalar('cost_nloc', self.cost_nloc, collections=['vd'])
            tf.summary.scalar('reg_loss', self.reg_loss, collections=['vd'])

            for i in range(self.decoder_num):
                for l in range(self.total_latent_size):
                    tf.summary.scalar('sf_log_{}_{}'.format(i, l), self.select_logs[i][l], collections=['vd'])

            for i in range(len(self.decoder_num_units)):
                weight_id = '' if i == 0 else '_{}'.format(i)
                for j in range(self.decoder_num):
                    tf.summary.histogram('dec{}_weight_{}'.format(j, i),
                                         self.graph.get_tensor_by_name('dec{}/fully_connected{}/weights:0'.format(j, weight_id)),
                                         collections=['vd'])

            for i in range(len(self.encoder_num_units)):
                weight_id = '' if i == 0 else '_{}'.format(i)
                for k in range(self.encoder_num):
                    tf.summary.histogram('enc_weight_{}'.format(i),
                                         self.graph.get_tensor_by_name('encoder_{}/fully_connected{}/weights:0'.format(k, weight_id)),
                                         collections=['vd'])

            self.summary_writer = tf.summary.FileWriter(io.tf_log_path + self.name + '/', graph=self.graph)
            self.summary_writer.add_summary(layout_summary)
            self.summary_writer.flush()
            self.vd_summaries = tf.summary.merge_all(key='vd')
            self.td_summaries = tf.summary.merge_all(key='td')
Esempio n. 26
0
        saver.restore(sess, latest_checkpoint)
        print_term(' done!', run_id)
    else:
        print_term('No checkpoint found in: {}'.format(checkpoint_paths),
                   run_id)

    # Actual training with epochs as iteration
    layout_summary = summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(title='losses',
                                chart=[
                                    layout_pb2.Chart(title='losses',
                                                     multiline=layout_pb2.
                                                     MultilineChartContent(
                                                         tag=[
                                                             'training_col',
                                                             'validation_col',
                                                             'training_fwd',
                                                             'validation_fwd',
                                                             'training_ref',
                                                             'validation_ref',
                                                         ]))
                                ])
        ]))
    train_col_writer.add_summary(layout_summary)

    for epoch in range(epochs):
        print_term(
            'Starting epoch: {} (total images {})'.format(
                epoch, total_train_images), run_id)
        # Training step
        for batch in range(batches):
Esempio n. 27
0
def write_combined_events(dpath, d_combined, dname='combined'):
    # ['reward', 'floor', 'reward.std', 'floor.std', 'steps', 'FPS', 'value_loss', 'action_loss_', 'dist_entropy_']
    fpath = os.path.join(dpath, dname)
    writer = tf.summary.FileWriter(fpath)

    tags, values = zip(*d_combined.items())

    # We only need to specify the layout once (instead of per step).
    layout_summary = summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='losses',
                chart=[
                    # layout_pb2.Chart(
                    #     title='losses',
                    #     multiline=layout_pb2.MultilineChartContent(
                    #         tag=[r'loss(?!.*margin.*)'],)),
                    layout_pb2.Chart(
                        title='floor',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='floor/mean',
                                lower='floor/std_lower',
                                upper='floor/std_upper'),
                        ], )),
                    # layout_pb2.Chart(
                    #     title='floor',
                    #     margin=layout_pb2.MarginChartContent(
                    #         series=[
                    #             layout_pb2.MarginChartContent.Series(
                    #                 value='floor/mean/scalar_summary',
                    #                 lower='floor/std_lower/scalar_summary',
                    #                 upper='floor/std_upper/scalar_summary'
                    #             ),
                    #         ],)),
                ]),
            # layout_pb2.Category(
            #     title='trig functions',
            #     chart=[
            #         layout_pb2.Chart(
            #             title='wave trig functions',
            #             multiline=layout_pb2.MultilineChartContent(
            #                 tag=[
            #                     r'trigFunctions/cosine', r'trigFunctions/sine'
            #                 ],)),
            #         # The range of tangent is different. Give it its own chart.
            #         layout_pb2.Chart(
            #             title='tan',
            #             multiline=layout_pb2.MultilineChartContent(
            #                 tag=[r'trigFunctions/tangent'],)),
            #     ],
            #     # This category we care less about. Make it initially closed.
            #     closed=True),
        ]))
    writer.add_summary(layout_summary)

    floor = d_combined['floor']
    floor_std = d_combined['floor.std']
    # for tag, tuples in zip(tags, values):
    # for t in tuples:
    for i, t in enumerate(floor):
        step = t[0]
        mean = t[1]
        lower = mean - floor_std[i][1]
        upper = mean + floor_std[i][1]
        summary = tf.Summary(
            value=[tf.Summary.Value(tag='floor/mean', simple_value=mean)])
        writer.add_summary(summary, global_step=step)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag='floor/std_lower', simple_value=lower)
        ])
        writer.add_summary(summary, global_step=step)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag='floor/std_upper', simple_value=upper)
        ])
        writer.add_summary(summary, global_step=step)

    # for tag, tuples in zip(tags, values):
    #     for t in tuples:
    #         step = t[0]
    #         value = t[1]
    #         summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
    #         writer.add_summary(summary, global_step=step)
    writer.flush()
Esempio n. 28
0
    def __init__(self, scalars=None, groups=None, pr_curve=None, tfpn=None, num_classes=None, val_generator=None, val_steps=None, **kwargs):
        """Constructor

        Args:
            scalars:
                A dict mapping strings to tensors.
                These tensors will be evaluated and show up as a scalar summary.
            groups:
                A dict that defines groups of scalars and the op names that they group.
                Accepts regex for op names.
                Example: {'category A': {'chart A1': ['op_name_1', r'.*acc.*']}}
            pr_curve:
                Evaluate the precision-recall curve.
            tfpn:
                Publish TP (True Positives), FP (False Positives), FN (False Negatives), F1 Score, Precision, Recall. (DEVEL).
            num_classes:
                The number of classes (dimension 1 of the data).
            val_generator:
                The PR curve callback only works with a static validation_data. Pass a generator here to generate the val data
                on the fly.
            val_steps:
                The number of steps to use for the val_generator.
            log_dir: the path of the directory where to save the log
                files to be parsed by TensorBoard.
            histogram_freq: frequency (in epochs) at which to compute activation
                and weight histograms for the layers of the model. If set to 0,
                histograms won't be computed. Validation data (or split) must be
                specified for histogram visualizations.
            write_graph: whether to visualize the graph in TensorBoard.
                The log file can become quite large when
                write_graph is set to True.
            write_grads: whether to visualize gradient histograms in TensorBoard.
                `histogram_freq` must be greater than 0.
            batch_size: size of batch of inputs to feed to the network
                for histograms computation.
            write_images: whether to write model weights to visualize as
                image in TensorBoard.
            embeddings_freq: frequency (in epochs) at which selected embedding
                layers will be saved. If set to 0, embeddings won't be computed.
                Data to be visualized in TensorBoard's Embedding tab must be passed
                as `embeddings_data`.
            embeddings_layer_names: a list of names of layers to keep eye on. If
                None or empty list all the embedding layer will be watched.
            embeddings_metadata: a dictionary which maps layer name to a file name
                in which metadata for this embedding layer is saved. See the
                [details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
                about metadata files format. In case if the same metadata file is
                used for all embedding layers, string can be passed.
            embeddings_data: data to be embedded at layers specified in
                `embeddings_layer_names`. Numpy array (if the model has a single
                input) or list of Numpy arrays (if the model has multiple inputs).
                Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
            **kwargs:
                All keyword arguments are passed to the standard TensorBoard plugin.
        """
        super().__init__(**kwargs)

        if not isinstance(scalars, (dict, type(None))):
            raise ValueError("scalars must be a dict mapping Strings to Tensors")
        self.scalars = scalars

        if not isinstance(groups, (dict, type(None))):
            raise ValueError("groups must be a dict like {'category A': {'chart A1': ['op_name_1', 'op_name_2']}}")
        self.groups = groups

        if pr_curve and num_classes is None:
            raise ValueError("pr_curve requires num_classes to be set.")

        self.pr_curve = pr_curve
        self.pr_summary = []
        self.num_classes = num_classes

        self.val_generator = val_generator
        self.val_steps = val_steps

        self.layout_summary = None
        if groups:
            categories = []
            for category_name, chart in groups.items():
                chart_list = []
                for chart_name, op_list in chart.items():
                    chart_list.append(
                        layout_pb2.Chart(
                            title=chart_name,
                            multiline=layout_pb2.MultilineChartContent(tag=op_list)))
                categories.append(
                    layout_pb2.Category(title=category_name, chart=chart_list))
            self.layout_summary = summary.custom_scalar_pb(
                layout_pb2.Layout(category=categories))

        self.tfpn = tfpn
        self.precision_summary = None
        self.recall_summary = None
        self.f1_summary = None
        self.tp_summary = None
        self.fn_summary = None
        self.fp_summary = None
Esempio n. 29
0
    def __init__(self, *args, **kwargs):
        super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
        self.logdir = os.path.join(self.get_temp_dir(), 'logdir')
        os.makedirs(self.logdir)

        self.logdir_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='cross entropy',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                ],
                closed=True)
        ])
        self.foo_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r'mean/layer0/biases', r'mean/layer1/biases'
                        ], )),
                ]),
            layout_pb2.Category(
                title='std weights',
                chart=[
                    layout_pb2.Chart(
                        title='stddev layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'stddev/layer\d+/weights'], )),
                ]),
            # A category with this name is also present in a layout for a
            # different run (the logdir run) and also contains a duplicate chart
            layout_pb2.Category(
                title='cross entropy',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy margin chart',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='cross entropy',
                                lower='cross entropy lower',
                                upper='cross entropy upper'),
                        ], )),
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                ]),
        ])

        # Generate test data.
        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        'foo')) as writer:
            writer.add_summary(summary.pb(self.foo_layout))
            for step in range(4):
                writer.add_summary(scalar_summary.pb('squares', step * step),
                                   step)

        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        'bar')) as writer:
            for step in range(3):
                writer.add_summary(scalar_summary.pb('increments', step + 1),
                                   step)

        # The '.' run lacks scalar data but has a layout.
        with test_util.FileWriterCache.get(self.logdir) as writer:
            writer.add_summary(summary.pb(self.logdir_layout))

        self.plugin = self.createPlugin(self.logdir)
Esempio n. 30
0
 def testLayoutFromSingleRun(self):
     # The foo directory contains 1 single layout.
     local_plugin = self.createPlugin(os.path.join(self.logdir, 'foo'))
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(local_plugin.layout_impl(), parsed_layout)
     self.assertProtoEquals(self.foo_layout, parsed_layout)