Esempio n. 1
0
    def testSetLayout(self):
        layout_proto_to_write = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'mean/layer\d+/biases'], )),
                ]),
            layout_pb2.Category(
                title='std weights',
                chart=[
                    layout_pb2.Chart(
                        title='stddev layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'stddev/layer\d+/weights'], )),
                ]),
            layout_pb2.Category(
                title='cross entropy ... and maybe some other values',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                    layout_pb2.Chart(
                        title='accuracy',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='accuracy',
                                lower='accuracy_lower_margin',
                                upper='accuracy_upper_margin')
                        ])),
                    layout_pb2.Chart(
                        title='max layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'max/layer1/.*', r'max/layer2/.*'], )),
                ],
                closed=True)
        ])

        # Write the data as a summary for the '.' run.
        with tf.Session() as s, tf.summary.FileWriter(self.logdir) as writer:
            writer.add_summary(s.run(summary.op(layout_proto_to_write)))

        # Read the data from disk.
        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(self.logdir)
        multiplexer.Reload()
        tensor_events = multiplexer.Tensors('.', metadata.CONFIG_SUMMARY_TAG)
        self.assertEqual(1, len(tensor_events))

        # Parse the data.
        string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
        content = np.asscalar(string_array)
        layout_proto_from_disk = layout_pb2.Layout()
        layout_proto_from_disk.ParseFromString(tf.compat.as_bytes(content))

        # Verify the content.
        self.assertProtoEquals(layout_proto_to_write, layout_proto_from_disk)
Esempio n. 2
0
 def testMergedLayout(self):
     ctx = context.RequestContext()
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(ctx, "exp_id"), parsed_layout)
     correct_layout = layout_pb2.Layout(
         category=[
             # A category with this name is also present in a layout for a
             # different run (the logdir run)
             layout_pb2.Category(
                 title="cross entropy",
                 chart=[
                     layout_pb2.Chart(
                         title="cross entropy",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"cross entropy"],
                         ),
                     ),
                     layout_pb2.Chart(
                         title="cross entropy margin chart",
                         margin=layout_pb2.MarginChartContent(
                             series=[
                                 layout_pb2.MarginChartContent.Series(
                                     value="cross entropy",
                                     lower="cross entropy lower",
                                     upper="cross entropy upper",
                                 ),
                             ],
                         ),
                     ),
                 ],
                 closed=True,
             ),
             layout_pb2.Category(
                 title="mean biases",
                 chart=[
                     layout_pb2.Chart(
                         title="mean layer biases",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[
                                 r"mean/layer0/biases",
                                 r"mean/layer1/biases",
                             ],
                         ),
                     ),
                 ],
             ),
             layout_pb2.Category(
                 title="std weights",
                 chart=[
                     layout_pb2.Chart(
                         title="stddev layer weights",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"stddev/layer\d+/weights"],
                         ),
                     ),
                 ],
             ),
         ]
     )
     self.assertProtoEquals(correct_layout, parsed_layout)
Esempio n. 3
0
def get_layout_summary():
    return summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="metrics",
                chart=[
                    layout_pb2.Chart(
                        title="losses",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=['train/loss', 'eval/loss'])),
                    layout_pb2.Chart(
                        title="rmsles",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=['train/rmsle', 'eval/rmsle'])),
                ])
        ]))
Esempio n. 4
0
    def _dump_tensors(self):
        if not self._has_recorded_tensor:
            return

        layout_categories = []

        for scope in self._scope_tensor:
            chart = []
            for name in self._scope_tensor[scope]:
                chart.append(
                    layout_pb2.Chart(
                        title=name,
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'name(?!.*margin.*)'.replace('name', name)
                                 ])))
            category = layout_pb2.Category(title=scope, chart=chart)
            layout_categories.append(category)

        if layout_categories:
            layout_proto_to_write = layout_pb2.Layout(
                category=layout_categories)

            try:
                # Load former layout_proto from self._layout_writer_dir.
                multiplexer = event_multiplexer.EventMultiplexer()
                multiplexer.AddRunsFromDirectory(self._layout_writer_dir)
                multiplexer.Reload()
                tensor_events = multiplexer.Tensors(
                    '.', metadata.CONFIG_SUMMARY_TAG)
                shutil.rmtree(self._layout_writer_dir)

                # Parse layout proto from disk.
                string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
                content = np.asscalar(string_array)
                layout_proto_from_disk = layout_pb2.Layout()
                layout_proto_from_disk.ParseFromString(
                    tf.compat.as_bytes(content))

                # Merge two layout proto.
                merged_layout_json = merge(
                    json_format.MessageToJson(layout_proto_from_disk),
                    json_format.MessageToJson(layout_proto_to_write))
                merged_layout_proto = layout_pb2.Layout()
                json_format.Parse(str(merged_layout_json), merged_layout_proto)

                self._layout_writer = tf.summary.FileWriter(
                    self._layout_writer_dir)
                layout_summary = summary_lib.custom_scalar_pb(
                    merged_layout_proto)
                self._layout_writer.add_summary(layout_summary)
                self._layout_writer.close()
            except KeyError:
                # Write the current layout proto into disk
                # when there is no layout.
                self._layout_writer = tf.summary.FileWriter(
                    self._layout_writer_dir)
                layout_summary = summary_lib.custom_scalar_pb(
                    layout_proto_to_write)
                self._layout_writer.add_summary(layout_summary)
                self._layout_writer.close()
Esempio n. 5
0
def custom_scalars(layout):
    categories = []
    for k, v in layout.items():
        charts = []
        for chart_name, chart_meatadata in v.items():
            tags = chart_meatadata[1]
            if chart_meatadata[0] == "Margin":
                assert len(tags) == 3
                mgcc = layout_pb2.MarginChartContent(series=[
                    layout_pb2.MarginChartContent.Series(
                        value=tags[0], lower=tags[1], upper=tags[2])
                ])
                chart = layout_pb2.Chart(title=chart_name, margin=mgcc)
            else:
                mlcc = layout_pb2.MultilineChartContent(tag=tags)
                chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)
            charts.append(chart)
        categories.append(layout_pb2.Category(title=k, chart=charts))

    layout = layout_pb2.Layout(category=categories)
    plugin_data = SummaryMetadata.PluginData(plugin_name="custom_scalars")
    smd = SummaryMetadata(plugin_data=plugin_data)
    tensor = TensorProto(
        dtype="DT_STRING",
        string_val=[layout.SerializeToString()],
        tensor_shape=TensorShapeProto(),
    )
    return Summary(value=[
        Summary.Value(
            tag="custom_scalars__config__", tensor=tensor, metadata=smd)
    ])
Esempio n. 6
0
def layout_dashboard(writer):
    from tensorboard import summary
    from tensorboard.plugins.custom_scalar import layout_pb2

    # This action does not have to be performed at every step, so the action is not
    # taken care of by an op in the graph. We only need to specify the layout once. 
    # We only need to specify the layout once (instead of per step).
    layout_summary = summary.custom_scalar_pb(layout_pb2.Layout(
        category=[
            layout_pb2.Category(
                title='losses',
                chart=[
                    layout_pb2.Chart(
                        title='losses',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'loss.*'],
                        )),
                    layout_pb2.Chart(
                        title='baz',
                        margin=layout_pb2.MarginChartContent(
                            series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='baz_lower/baz/scalar_summary',
                                    upper='baz_upper/baz/scalar_summary'),
                            ],
                        )),
                ]),
            layout_pb2.Category(
                title='trig functions',
                chart=[
                    layout_pb2.Chart(
                        title='wave trig functions',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'trigFunctions/cosine', r'trigFunctions/sine'],
                        )),
                    # The range of tangent is different. Let's give it its own chart.
                    layout_pb2.Chart(
                        title='tan',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'trigFunctions/tangent'],
                        )),
                ],
                # This category we care less about. Let's make it initially closed.
                closed=True),
        ]))
    writer.add_summary(layout_summary)
Esempio n. 7
0
def gen_multiline_charts(cfg):
    charts = []
    for title, tags in cfg:
        charts.append(
            layout_pb2.Chart(
                title=title,
                multiline=layout_pb2.MultilineChartContent(tag=tags)))
    return charts
Esempio n. 8
0
def custom_board_generator(graph):
    # We make the coefficient and scaled coefficient charts first because we need to do it dynamically.
    coeff_chart = [
        layout_pb2.Chart(title='Coeff_' + str(idx),
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r'Coeff_' + str(idx) + '_Comp_*']))
        for idx in np.arange(len(graph.PI_costs))
    ]
    coeff_scaled_chart = [
        layout_pb2.Chart(title='Scaled_Coeff_' + str(idx),
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r'Scaled_Coeff_' + str(idx) + '_Comp_*']))
        for idx in np.arange(len(graph.PI_costs))
    ]

    # Actually making the board
    custom_board = custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(title='Training',
                chart=[layout_pb2.Chart(title='MSE_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'MSE_cost_*'])),
                       layout_pb2.Chart(title='PI_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'PI_cost_*'])),
                       layout_pb2.Chart(title='L1_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'L1_cost_*'])),
                       layout_pb2.Chart(title='Total_cost', multiline=layout_pb2.MultilineChartContent(tag=['Total_cost'])),
                       layout_pb2.Chart(title='Gradloss', multiline=layout_pb2.MultilineChartContent(tag=['Loss_Grad']))\
                        ]
            ),
        layout_pb2.Category(title='Coefficients', chart=coeff_chart),
        layout_pb2.Category(title='Scaled_Coefficients', chart=coeff_scaled_chart)
            ]))

    return custom_board
Esempio n. 9
0
def create_layout_summary():
    return cs_summary.pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="losses",
                chart=[
                    layout_pb2.Chart(
                        title="losses",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"loss(?!.*margin.*)"], ),
                    ),
                    layout_pb2.Chart(
                        title="baz",
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value="loss/baz",
                                lower="loss/baz_lower",
                                upper="loss/baz_upper",
                            ),
                        ], ),
                    ),
                ],
            ),
            layout_pb2.Category(
                title="trig functions",
                chart=[
                    layout_pb2.Chart(
                        title="wave trig functions",
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r"trigFunctions/cosine",
                            r"trigFunctions/sine",
                        ], ),
                    ),
                    # The range of tangent is different. Give it its own chart.
                    layout_pb2.Chart(
                        title="tan",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"trigFunctions/tangent"], ),
                    ),
                ],
                # This category we care less about. Make it initially closed.
                closed=True,
            ),
        ]))
Esempio n. 10
0
 def testMergedLayout(self):
     parsed_layout = layout_pb2.Layout()
     json_format.Parse(self.plugin.layout_impl(), parsed_layout)
     correct_layout = layout_pb2.Layout(category=[
         # A category with this name is also present in a layout for a
         # different run (the logdir run)
         layout_pb2.Category(
             title='cross entropy',
             chart=[
                 layout_pb2.Chart(
                     title='cross entropy',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'cross entropy'], )),
                 layout_pb2.Chart(
                     title='cross entropy margin chart',
                     margin=layout_pb2.MarginChartContent(series=[
                         layout_pb2.MarginChartContent.Series(
                             value='cross entropy',
                             lower='cross entropy lower',
                             upper='cross entropy upper'),
                     ], )),
             ],
             closed=True,
         ),
         layout_pb2.Category(
             title='mean biases',
             chart=[
                 layout_pb2.Chart(
                     title='mean layer biases',
                     multiline=layout_pb2.MultilineChartContent(tag=[
                         r'mean/layer0/biases', r'mean/layer1/biases'
                     ], )),
             ]),
         layout_pb2.Category(
             title='std weights',
             chart=[
                 layout_pb2.Chart(
                     title='stddev layer weights',
                     multiline=layout_pb2.MultilineChartContent(
                         tag=[r'stddev/layer\d+/weights'], )),
             ]),
     ])
     self.assertProtoEquals(correct_layout, parsed_layout)
 def _init_custom_scalar_layout(self):
     layout = layout_pb2.Layout(
         category=[
             layout_pb2.Category(
                 title="mean rewards",
                 chart=[
                     layout_pb2.Chart(
                         title="mean rewards per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean rewards"]
                         ),
                     )
                 ],
             ),
             layout_pb2.Category(
                 title="mean episode lengths",
                 chart=[
                     layout_pb2.Chart(
                         title="mean episode length per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean episode lengths"]
                         ),
                     )
                 ],
             ),
             layout_pb2.Category(
                 title="mean fruits eaten",
                 chart=[
                     layout_pb2.Chart(
                         title="mean fruits eaten per actor",
                         multiline=layout_pb2.MultilineChartContent(
                             tag=[r"actor-\d+/mean fruits eaten"]
                         ),
                     )
                 ],
             ),
         ]
     )
     self.writer.add_summary(summary.custom_scalar_pb(layout))
Esempio n. 12
0
def add_custom_scalar(logdir):
    summary_writer = SummaryWriterCache.get(logdir)
    layout_summary = summary.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='Loss',
                chart=[
                    layout_pb2.Chart(
                        title='Loss',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'1_loss/*'], )),
                    layout_pb2.Chart(
                        title='Loss Component',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'2_loss_component/*'], )),
                    layout_pb2.Chart(
                        title='Discriminator Values',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'3_discriminator_values/*'], )),
                    layout_pb2.Chart(
                        title='Variation of sequences',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Stddev/*'], )),
                    layout_pb2.Chart(
                        title='BLOMSUM45',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/BLOMSUM45'], )),
                    layout_pb2.Chart(
                        title='Evalue',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/Evalue'], )),
                    layout_pb2.Chart(
                        title='Identity',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'Blast/*/Identity'], )),
                ]),
        ]))
    summary_writer.add_summary(layout_summary)
import argparse
from pathlib import Path

import tensorflow as tf

from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2

layout_summary = summary_lib.custom_scalar_pb(
    layout_pb2.Layout(category=[
        layout_pb2.Category(
            title='losses',
            chart=[
                # Chart 'losses' (include all losses, exclude upper and lower bounds)
                layout_pb2.Chart(title='losses',
                                 multiline=layout_pb2.MultilineChartContent(
                                     tag=[r'loss(?!.*bound.*)'])),
            ])
    ]))

parser = argparse.ArgumentParser()
parser.add_argument('folder', help='The log folder to place the layout in')
args = parser.parse_args()

folder = (Path(args.folder) / 'layout').expanduser().resolve()
with tf.summary.FileWriter(folder) as writer:
    writer.add_summary(layout_summary)

print('Layout saved to', folder)
Esempio n. 14
0
def get_layout_chart(title, tags):
    return layout_pb2.Chart(
        title=title,
        multiline=layout_pb2.MultilineChartContent(
            tag=tags,  # search for string using regex (similar to .startswith())
        ))
    def __init__(self, *args, **kwargs):
        super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
        self.logdir = os.path.join(self.get_temp_dir(), "logdir")
        os.makedirs(self.logdir)

        self.logdir_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="cross entropy",
                chart=[
                    layout_pb2.Chart(
                        title="cross entropy",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"cross entropy"], ),
                    ),
                ],
                closed=True,
            )
        ])
        self.foo_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title="mean biases",
                chart=[
                    layout_pb2.Chart(
                        title="mean layer biases",
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r"mean/layer0/biases",
                            r"mean/layer1/biases",
                        ], ),
                    ),
                ],
            ),
            layout_pb2.Category(
                title="std weights",
                chart=[
                    layout_pb2.Chart(
                        title="stddev layer weights",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"stddev/layer\d+/weights"], ),
                    ),
                ],
            ),
            # A category with this name is also present in a layout for a
            # different run (the logdir run) and also contains a duplicate chart
            layout_pb2.Category(
                title="cross entropy",
                chart=[
                    layout_pb2.Chart(
                        title="cross entropy margin chart",
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value="cross entropy",
                                lower="cross entropy lower",
                                upper="cross entropy upper",
                            ),
                        ], ),
                    ),
                    layout_pb2.Chart(
                        title="cross entropy",
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r"cross entropy"], ),
                    ),
                ],
            ),
        ])

        # Generate test data.
        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        "foo")) as writer:
            writer.add_summary(
                test_util.ensure_tb_summary_proto(summary.pb(self.foo_layout)))
            for step in range(4):
                writer.add_summary(
                    test_util.ensure_tb_summary_proto(
                        scalar_summary.pb("squares", step * step)),
                    step,
                )

        with test_util.FileWriterCache.get(os.path.join(self.logdir,
                                                        "bar")) as writer:
            for step in range(3):
                writer.add_summary(
                    test_util.ensure_tb_summary_proto(
                        scalar_summary.pb("increments", step + 1)),
                    step,
                )

        # The '.' run lacks scalar data but has a layout.
        with test_util.FileWriterCache.get(self.logdir) as writer:
            writer.add_summary(
                test_util.ensure_tb_summary_proto(
                    summary.pb(self.logdir_layout)))

        self.plugin = self.createPlugin(self.logdir)
Esempio n. 16
0
def run():
    """Run custom scalar demo and generate event files."""
    step = tf.placeholder(tf.float32, shape=[])

    with tf.name_scope('loss'):
        # Specify 2 different loss values, each tagged differently.
        summary_lib.scalar('foo', tf.pow(0.9, step))
        summary_lib.scalar('bar', tf.pow(0.85, step + 2))

        # Log metric baz as well as upper and lower bounds for a margin chart.
        middle_baz_value = step + 4 * tf.random_uniform([]) - 2
        summary_lib.scalar('baz', middle_baz_value)
        summary_lib.scalar('baz_lower',
                           middle_baz_value - 6.42 - tf.random_uniform([]))
        summary_lib.scalar('baz_upper',
                           middle_baz_value + 6.42 + tf.random_uniform([]))

    with tf.name_scope('trigFunctions'):
        summary_lib.scalar('cosine', tf.cos(step))
        summary_lib.scalar('sine', tf.sin(step))
        summary_lib.scalar('tangent', tf.tan(step))

    merged_summary = tf.summary.merge_all()

    with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
        # We only need to specify the layout once (instead of per step).
        layout_summary = summary_lib.custom_scalar_pb(
            layout_pb2.Layout(category=[
                layout_pb2.Category(
                    title='losses',
                    chart=[
                        layout_pb2.Chart(
                            title='losses',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'loss(?!.*margin.*)'], )),
                        layout_pb2.Chart(
                            title='baz',
                            margin=layout_pb2.MarginChartContent(series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='loss/baz_lower/scalar_summary',
                                    upper='loss/baz_upper/scalar_summary'),
                            ], )),
                    ]),
                layout_pb2.Category(
                    title='trig functions',
                    chart=[
                        layout_pb2.Chart(
                            title='wave trig functions',
                            multiline=layout_pb2.MultilineChartContent(tag=[
                                r'trigFunctions/cosine', r'trigFunctions/sine'
                            ], )),
                        # The range of tangent is different. Give it its own chart.
                        layout_pb2.Chart(
                            title='tan',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'trigFunctions/tangent'], )),
                    ],
                    # This category we care less about. Make it initially closed.
                    closed=True),
            ]))
        writer.add_summary(layout_summary)

        for i in xrange(42):
            summary = sess.run(merged_summary, feed_dict={step: i})
            writer.add_summary(summary, global_step=i)
import argparse
from pathlib import Path

import tensorflow as tf

from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2

layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[
    layout_pb2.Category(
        title='Losses',
        chart=[
            layout_pb2.Chart(
                title='Train', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/train/l1'])),
            layout_pb2.Chart(
                title='Val', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/val/mse'])),
        ])
]))

parser = argparse.ArgumentParser()
parser.add_argument('folder', help='The log folder to place the layout in')
args = parser.parse_args()

folder = (Path(args.folder) / 'layout').expanduser().resolve()
with tf.summary.FileWriter(folder) as writer:
    writer.add_summary(layout_summary)

print('Layout saved to', folder)
Esempio n. 18
0
    def graph_setup(self):
        """
        Set up the computation graph for the neural network based on the parameters set at initialization
        """
        with self.graph.as_default():

            #######################
            # Define placeholders #
            #######################
            self.gamma = tf.placeholder(tf.float32, shape=[], name='gamma')
            self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
            self.reg_loss_factor = tf.placeholder(tf.float32, shape=[], name='reg_loss_factor')
            self.nloc_factor = tf.placeholder(tf.float32, shape=[], name='nloc_factor')
            self.total_latent_size = np.sum(self.latent_sizes)

            self.inputs = [tf.placeholder(tf.float32, [None, self.input_sizes[k]], name='input{}'.format(k))
                           for k in range(self.encoder_num)]

            self.question_inputs = [
                tf.placeholder(tf.float32, shape=[None, self.question_sizes[i]], name='q_dec{}'.format(i))
                for i in range(self.decoder_num)
            ]

            self.answers = [
                tf.placeholder(tf.float32, shape=[None, self.answer_sizes[i]], name='q_dec{}'.format(i))
                for i in range(self.decoder_num)
            ]

            self.select_noise = [
                tf.placeholder(tf.float32, shape=[None, self.total_latent_size], name='select_noise_{}'.format(i))
                for i in range(self.decoder_num)
            ]

            def fc_layer(in_layer, num_outputs, activation_fn, collection='std'):
                return fully_connected(in_layer, num_outputs, activation_fn,
                                       weights_regularizer=l2_regularizer(1.),
                                       biases_regularizer=l2_regularizer(1.),
                                       variables_collections=[collection])

            ##########################################
            # Set up variables and computation graph #
            ##########################################
            self.individual_latent = []
            for k in range(self.encoder_num):
                with tf.variable_scope('encoder_{}'.format(k)):
                    temp_layer = self.inputs[k]
                    for n in self.encoder_num_units:
                        temp_layer = fc_layer(temp_layer, num_outputs=n, activation_fn=tf.nn.elu)
                    self.individual_latent.append(fc_layer(temp_layer, num_outputs=self.latent_sizes[k], activation_fn=tf.identity))

            with tf.variable_scope('latent_layer'):
                self.full_latent = tf.concat(self.individual_latent, axis=1)
                latent_std = tf.math.sqrt(tf.nn.moments(self.full_latent, axes=[0])[1])
                self.select_logs = []
                self.dec_inputs = []
                for n in range(self.decoder_num):
                    with tf.variable_scope('select_dec{}'.format(n)):
                        selectors = tf.get_variable('sf_log',
                                                    initializer=tf.initializers.constant(-10.),
                                                    shape=self.total_latent_size,
                                                    collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'sel'])
                        self.select_logs.append(selectors)
                        self.dec_inputs.append(self.full_latent + latent_std * tf.exp(selectors) * self.select_noise[n])

            self.outputs = []
            for n in range(self.decoder_num):
                with tf.variable_scope('dec{}'.format(n)):
                    temp_layer = tf.concat([self.dec_inputs[n], self.question_inputs[n]], axis=1, name='dec_in')

                    for q in self.decoder_num_units:
                        temp_layer = fc_layer(temp_layer, num_outputs=q, activation_fn=tf.nn.elu)

                    out = np.pi / 2. * fc_layer(temp_layer, num_outputs=self.answer_sizes[n], activation_fn=tf.identity)

                self.outputs.append(out)

            #####################
            # Cost and training #
            #####################
            with tf.name_scope('cost'):
                sel_cost_list = []
                ans_cost_list = []
                for n in range(self.decoder_num):
                    sel_cost_list.append(tf.reduce_mean(self.select_logs[n]))
                    ans_cost_list.append(tf.reduce_mean(tf.reduce_sum(tf.squared_difference(self.answers[n], self.outputs[n]), axis=1)))

                self.cost_select = (-1) * tf.add_n(sel_cost_list)
                loc_cut = int(ceil(self.decoder_num / 2))
                self.cost_loc = tf.add_n([ans_cost_list[i] for i in range(0, loc_cut)], name='cost_local')
                self.cost_nloc = tf.add_n([ans_cost_list[i] for i in range(loc_cut, self.decoder_num)], name='cost_local')
                self.weighted_cost = (self.cost_loc + self.nloc_factor * self.cost_nloc) / (1. + self.nloc_factor)

            with tf.name_scope('reg_loss'):
                self.reg_loss = tf.losses.get_regularization_loss()

            with tf.name_scope('optimizer'):
                optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)

                self.training_op = self.train_op_from_loss(optimizer, self.weighted_cost)
                self.pretraining_op = self.train_op_from_loss(optimizer, self.weighted_cost, collections=['std', 'loc_decoder', 'nloc_decoder'])

            #########################
            # Tensorboard summaries #
            #########################

            chart = []
            for i in range(self.decoder_num):
                chart.append(layout_pb2.Chart(
                    title='Decoder {}'.format(i),
                    multiline=layout_pb2.MultilineChartContent(
                        tag=[r'^sf_log_{}'.format(i)]
                    )
                ))

            layout_summary = summary_lib.custom_scalar_pb(
                layout_pb2.Layout(category=[
                    layout_pb2.Category(
                        title='Select factors',
                        chart=chart)
                ])
            )

            tf.summary.scalar('cost_select', self.cost_select, collections=['vd'])
            tf.summary.scalar('cost', self.weighted_cost, collections=['vd'])
            tf.summary.scalar('cost_td', self.weighted_cost, collections=['td'])
            tf.summary.scalar('cost_loc', self.cost_loc, collections=['vd'])
            tf.summary.scalar('cost_nloc', self.cost_nloc, collections=['vd'])
            tf.summary.scalar('reg_loss', self.reg_loss, collections=['vd'])

            for i in range(self.decoder_num):
                for l in range(self.total_latent_size):
                    tf.summary.scalar('sf_log_{}_{}'.format(i, l), self.select_logs[i][l], collections=['vd'])

            for i in range(len(self.decoder_num_units)):
                weight_id = '' if i == 0 else '_{}'.format(i)
                for j in range(self.decoder_num):
                    tf.summary.histogram('dec{}_weight_{}'.format(j, i),
                                         self.graph.get_tensor_by_name('dec{}/fully_connected{}/weights:0'.format(j, weight_id)),
                                         collections=['vd'])

            for i in range(len(self.encoder_num_units)):
                weight_id = '' if i == 0 else '_{}'.format(i)
                for k in range(self.encoder_num):
                    tf.summary.histogram('enc_weight_{}'.format(i),
                                         self.graph.get_tensor_by_name('encoder_{}/fully_connected{}/weights:0'.format(k, weight_id)),
                                         collections=['vd'])

            self.summary_writer = tf.summary.FileWriter(io.tf_log_path + self.name + '/', graph=self.graph)
            self.summary_writer.add_summary(layout_summary)
            self.summary_writer.flush()
            self.vd_summaries = tf.summary.merge_all(key='vd')
            self.td_summaries = tf.summary.merge_all(key='td')
Esempio n. 19
0
  summary_lib.scalar('tangent', tf.tan(step))

merged_summary = tf.summary.merge_all()

logdir = '/tmp/custom_scalar_demo'
with tf.Session() as sess, tf.summary.FileWriter(logdir) as writer:
  # We only need to specify the layout once (instead of per step).
  layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(
      category=[
          layout_pb2.Category(
              title='losses',
              chart=[
                  layout_pb2.Chart(
                      title='losses',
                      multiline=layout_pb2.MultilineChartContent(
                          tag=[r'loss(?!.*margin.*)'],
                      )),
                  layout_pb2.Chart(
                      title='baz',
                      margin=layout_pb2.MarginChartContent(
                          series=[
                              layout_pb2.MarginChartContent.Series(
                                  value='loss/baz/scalar_summary',
                                  lower='loss/baz_lower_margin/scalar_summary',
                                  upper='loss/baz_upper_margin/scalar_summary'),
                          ],
                      )),
              ]),
          layout_pb2.Category(
              title='trig functions',
              chart=[
Esempio n. 20
0
    def __init__(self, *args, **kwargs):
        super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
        self.logdir = os.path.join(self.get_temp_dir(), 'logdir')
        os.makedirs(self.logdir)

        self.logdir_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='cross entropy',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                ],
                closed=True)
        ])
        self.foo_layout = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            r'mean/layer0/biases', r'mean/layer1/biases'
                        ], )),
                ]),
            layout_pb2.Category(
                title='std weights',
                chart=[
                    layout_pb2.Chart(
                        title='stddev layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'stddev/layer\d+/weights'], )),
                ]),
            # A category with this name is also present in a layout for a
            # different run (the logdir run)
            layout_pb2.Category(
                title='cross entropy',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy margin chart',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='cross entropy',
                                lower='cross entropy lower',
                                upper='cross entropy upper'),
                        ], )),
                ]),
        ])

        # Generate test data.
        with tf.summary.FileWriter(os.path.join(self.logdir, 'foo')) as writer:
            writer.add_summary(summary.pb(self.foo_layout))
            for step in range(4):
                writer.add_summary(scalar_summary.pb('squares', step * step),
                                   step)

        with tf.summary.FileWriter(os.path.join(self.logdir, 'bar')) as writer:
            for step in range(3):
                writer.add_summary(scalar_summary.pb('increments', step + 1),
                                   step)

        # The '.' run lacks scalar data but has a layout.
        with tf.summary.FileWriter(self.logdir) as writer:
            writer.add_summary(summary.pb(self.logdir_layout))

        self.plugin = self.createPlugin(self.logdir)
    def run_model(self):

        os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # gpu selection
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.per_process_gpu_memory_fraction = 1  # 100% gpu
        sess_config.gpu_options.allow_growth = True  # dynamic growth

        #iter_steps = (self._train_data.shape[0] / self._batch_size) * self._epoch
        #epoch = 1
        tot = 0
        history = np.empty([0, 5])

        with tf.Session(config=sess_config) as sess:

            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            train_writer = tf.summary.FileWriter(self._log_path + '/train',
                                                 graph=sess.graph)
            test_write = tf.summary.FileWriter(self._log_path + '/test',
                                               graph=sess.graph)

            custom_layout = summary_lib.custom_scalar_pb(
                layout_pb2.Layout(category=[
                    layout_pb2.Category(
                        title='Accuracies',
                        chart=[
                            layout_pb2.Chart(title='Activity',
                                             multiline=layout_pb2.
                                             MultilineChartContent(tag=[
                                                 r'train_1/a_accuracy',
                                                 r'val_1/a_accuracy'
                                             ])),
                            layout_pb2.Chart(title='User',
                                             multiline=layout_pb2.
                                             MultilineChartContent(tag=[
                                                 r'train_1/u_accuracy',
                                                 r'val_1/u_accuracy'
                                             ])),
                        ]),
                    layout_pb2.Category(
                        title='Losses',
                        chart=[
                            layout_pb2.Chart(
                                title='Activity',
                                multiline=layout_pb2.MultilineChartContent(
                                    tag=[r'train_1/a_loss', r'val_1/a_loss'])),
                            layout_pb2.Chart(
                                title='User',
                                multiline=layout_pb2.MultilineChartContent(
                                    tag=[r'train_1/u_loss', r'val_1/u_loss'])),
                        ])
                ]))

            train_writer.add_summary(custom_layout)

            # result_array    = np.empty( [0, 2, len( self._test_data )] )
            LARecord = np.empty([0, 2, self._test_data.shape[0]])
            LURecord = np.empty([0, 2, self._test_data.shape[0]])

            for i in range(self._iter_steps):

                data, la, lu = self.next_batch()
                lr = self._min_lr + (self._max_lr - self._min_lr) * math.exp(
                    -i / self._decay_speed)
                tot += data.shape[0]
                '''
                if self._framework == 1:
                    summary, _, _, _, _, _ = sess.run([self._merged, self._update_ops, self._a_train_step, self._u_train_step, self._a_accuracy_op, self._u_accuracy_op], feed_dict={
                        self._X:                data,
                        self._YA:               la,
                        self._YU:               lu,
                        self._learning_rate:    lr,
                        self._is_training:      True})
                elif self._framework == 2:
                    summary, _, _, = sess.run([self._merged, self._update_ops, self._a_train_step], feed_dict={
                        self._X:                data,
                        self._YA:               la,
                        self._YU:               lu,
                        self._learning_rate:    lr,
                        self._is_training:      True})
                else:
                    print("model error")
                    exit()
                '''
                if self._framework == 1:
                    _, _, _, a_acc_train, u_acc_train, _, _, a_loss, u_loss, _lr = sess.run(
                        [
                            self._update_ops, self._a_train_step,
                            self._u_train_step, self._a_accuracy_train,
                            self._u_accuracy_train, self._a_accuracy_op_train,
                            self._u_accuracy_op_train, self._a_loss_train,
                            self._u_loss_train, self._lr
                        ],
                        feed_dict={
                            self._X: data,
                            self._YA: la,
                            self._YU: lu,
                            self._learning_rate: lr,
                            self._is_training: True
                        })
                elif self._framework == 2:
                    _, _, a_acc_train, u_acc_train, _, _, a_loss, u_loss, _lr = sess.run(
                        [
                            self._update_ops, self._a_train_step,
                            self._a_accuracy_train, self._u_accuracy_train,
                            self._a_accuracy_op_train,
                            self._u_accuracy_op_train, self._a_loss_train,
                            self._u_loss_train, self._lr
                        ],
                        feed_dict={
                            self._X: data,
                            self._YA: la,
                            self._YU: lu,
                            self._learning_rate: lr,
                            self._is_training: True
                        })
                else:
                    print("model error")
                    exit()

                train_writer.add_summary(a_acc_train, i)
                train_writer.add_summary(u_acc_train, i)
                train_writer.add_summary(a_loss, i)
                train_writer.add_summary(u_loss, i)
                train_writer.add_summary(_lr, i)

                if i % self._print_interval == 0:

                    # added reset for validation metrics
                    stream_vars_valid = [
                        v for v in tf.local_variables() if 'val/' in v.name
                    ]
                    sess.run(tf.variables_initializer(stream_vars_valid))

                    a_acc_val, u_acc_val, a_loss, u_loss, LATruth, LAPreds, LUTruth, LUPreds = self.predict(
                        sess, lr)

                    LARecord = np.append(LARecord,
                                         np.expand_dims(
                                             np.vstack((LATruth, LAPreds)), 0),
                                         axis=0)
                    LURecord = np.append(LURecord,
                                         np.expand_dims(
                                             np.vstack((LUTruth, LUPreds)), 0),
                                         axis=0)

                    AAccuracy = accuracy_score(LATruth, LAPreds,
                                               range(self._dataset._act_num))
                    Af1 = f1_score(LATruth,
                                   LAPreds,
                                   range(self._dataset._act_num),
                                   average='macro')

                    UAccuracy = accuracy_score(LUTruth, LUPreds,
                                               range(self._dataset._user_num))
                    Uf1 = f1_score(LUTruth,
                                   LUPreds,
                                   range(self._dataset._user_num),
                                   average='macro')

                    test_write.add_summary(a_acc_val, i)
                    test_write.add_summary(u_acc_val, i)
                    test_write.add_summary(a_loss, i)
                    test_write.add_summary(u_loss, i)
                    print(
                        "step: {},   AAccuracy: {},  Af1: {},  UAccuracy: {},  Uf1: {}"
                        .format(i, AAccuracy, Af1, UAccuracy, Uf1))

                    history = np.concatenate(
                        (history,
                         np.array([[i, AAccuracy, Af1, UAccuracy, Uf1]])),
                        axis=0)

            if self._framework == 1:
                self.save_paremeters(sess)
                np.savetxt(self._result_path +
                           'log_history_pre_train_{}.txt'.format(self._fold),
                           history,
                           header='Step  AAaccuracy Af1 UAccuracy Uf1',
                           fmt='%d %1.4f %1.4f %1.4f %1.4f',
                           delimiter='\t')
                print('finish pretrain')

            if self._framework == 2:

                # save log of train to file
                np.savetxt(self._result_path +
                           'log_history_train_{}.txt'.format(self._fold),
                           history,
                           header='Step  AAaccuracy Af1 UAccuracy Uf1',
                           fmt='%d %1.4f %1.4f %1.4f %1.4f',
                           delimiter='\t')

                LARecordFile = self._result_path + \
                    "AR_fold{}_".format(
                        self._fold) + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))

                LURecordFile = self._result_path + \
                    "UR_fold{}_".format(
                        self._fold) + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))

                np.save(LARecordFile, LARecord)
                np.save(LURecordFile, LURecord)
                print("finish train")

        tf.keras.backend.clear_session()
Esempio n. 22
0
    def __init__(self, scalars=None, groups=None, pr_curve=None, tfpn=None, num_classes=None, val_generator=None, val_steps=None, **kwargs):
        """Constructor

        Args:
            scalars:
                A dict mapping strings to tensors.
                These tensors will be evaluated and show up as a scalar summary.
            groups:
                A dict that defines groups of scalars and the op names that they group.
                Accepts regex for op names.
                Example: {'category A': {'chart A1': ['op_name_1', r'.*acc.*']}}
            pr_curve:
                Evaluate the precision-recall curve.
            tfpn:
                Publish TP (True Positives), FP (False Positives), FN (False Negatives), F1 Score, Precision, Recall. (DEVEL).
            num_classes:
                The number of classes (dimension 1 of the data).
            val_generator:
                The PR curve callback only works with a static validation_data. Pass a generator here to generate the val data
                on the fly.
            val_steps:
                The number of steps to use for the val_generator.
            log_dir: the path of the directory where to save the log
                files to be parsed by TensorBoard.
            histogram_freq: frequency (in epochs) at which to compute activation
                and weight histograms for the layers of the model. If set to 0,
                histograms won't be computed. Validation data (or split) must be
                specified for histogram visualizations.
            write_graph: whether to visualize the graph in TensorBoard.
                The log file can become quite large when
                write_graph is set to True.
            write_grads: whether to visualize gradient histograms in TensorBoard.
                `histogram_freq` must be greater than 0.
            batch_size: size of batch of inputs to feed to the network
                for histograms computation.
            write_images: whether to write model weights to visualize as
                image in TensorBoard.
            embeddings_freq: frequency (in epochs) at which selected embedding
                layers will be saved. If set to 0, embeddings won't be computed.
                Data to be visualized in TensorBoard's Embedding tab must be passed
                as `embeddings_data`.
            embeddings_layer_names: a list of names of layers to keep eye on. If
                None or empty list all the embedding layer will be watched.
            embeddings_metadata: a dictionary which maps layer name to a file name
                in which metadata for this embedding layer is saved. See the
                [details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
                about metadata files format. In case if the same metadata file is
                used for all embedding layers, string can be passed.
            embeddings_data: data to be embedded at layers specified in
                `embeddings_layer_names`. Numpy array (if the model has a single
                input) or list of Numpy arrays (if the model has multiple inputs).
                Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
            **kwargs:
                All keyword arguments are passed to the standard TensorBoard plugin.
        """
        super().__init__(**kwargs)

        if not isinstance(scalars, (dict, type(None))):
            raise ValueError("scalars must be a dict mapping Strings to Tensors")
        self.scalars = scalars

        if not isinstance(groups, (dict, type(None))):
            raise ValueError("groups must be a dict like {'category A': {'chart A1': ['op_name_1', 'op_name_2']}}")
        self.groups = groups

        if pr_curve and num_classes is None:
            raise ValueError("pr_curve requires num_classes to be set.")

        self.pr_curve = pr_curve
        self.pr_summary = []
        self.num_classes = num_classes

        self.val_generator = val_generator
        self.val_steps = val_steps

        self.layout_summary = None
        if groups:
            categories = []
            for category_name, chart in groups.items():
                chart_list = []
                for chart_name, op_list in chart.items():
                    chart_list.append(
                        layout_pb2.Chart(
                            title=chart_name,
                            multiline=layout_pb2.MultilineChartContent(tag=op_list)))
                categories.append(
                    layout_pb2.Category(title=category_name, chart=chart_list))
            self.layout_summary = summary.custom_scalar_pb(
                layout_pb2.Layout(category=categories))

        self.tfpn = tfpn
        self.precision_summary = None
        self.recall_summary = None
        self.f1_summary = None
        self.tp_summary = None
        self.fn_summary = None
        self.fp_summary = None