def index_impl(self, experiment=None): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" if self._data_provider: mapping = self._data_provider.list_scalars( experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in six.iteritems(mapping): for (tag, metadatum) in six.iteritems(tag_to_content): description = plugin_util.markdown_to_safe_html( metadatum.description) result[run][tag] = { 'displayName': metadatum.display_name, 'description': description, } return result if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute( ''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME, )) result = collections.defaultdict(dict) for row in cursor: tag_name, display_name, run_name = row result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } return result result = collections.defaultdict(lambda: {}) mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_plugin_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = { 'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description) } return result
def _index_impl(self): if self._db_connection_provider: db = self._db_connection_provider() cursor = db.execute( """ SELECT Runs.run_name, Tags.tag_name, Tags.display_name, Descriptions.description, /* Subtract 2 for leading width and height elements. */ MAX(CAST (Tensors.shape AS INT)) - 2 AS samples FROM Tags JOIN Runs USING (run_id) JOIN Tensors ON Tags.tag_id = Tensors.series LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id WHERE Tags.plugin_name = :plugin /* Shape should correspond to a rank-1 tensor. */ AND NOT INSTR(Tensors.shape, ',') /* Required to use TensorSeriesStepIndex. */ AND Tensors.step IS NOT NULL GROUP BY Tags.tag_id HAVING samples >= 1 """, {"plugin": metadata.PLUGIN_NAME}, ) result = collections.defaultdict(dict) for row in cursor: run_name, tag_name, display_name, description, samples = row description = description or "" # Handle missing descriptions. result[run_name][tag_name] = { "displayName": display_name, "description": plugin_util.markdown_to_safe_html(description), "samples": samples, } return result runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([ len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events ] + [0]) result[run][tag] = { "displayName": summary_metadata.display_name, "description": plugin_util.markdown_to_safe_html( summary_metadata.summary_description), "samples": samples, } return result
def text_array_to_html(text_arr): """Take a numpy.ndarray containing strings, and convert it into html. If the ndarray contains a single scalar string, that string is converted to html via our sanitized markdown parser. If it contains an array of strings, the strings are individually converted to html and then composed into a table using make_table. If the array contains dimensionality greater than 2, all but two of the dimensions are removed, and a warning message is prefixed to the table. Args: text_arr: A numpy.ndarray containing strings. Returns: The array converted to html. """ if not text_arr.shape: # It is a scalar. No need to put it in a table, just apply markdown return plugin_util.markdown_to_safe_html(text_arr.item()) warning = "" if len(text_arr.shape) > 2: warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE % len(text_arr.shape)) text_arr = reduce_to_2d(text_arr) table = plugin_util.markdowns_to_safe_html( text_arr.reshape(-1), lambda xs: make_table(np.array(xs).reshape(text_arr.shape)), ) return warning + table
def text_array_to_html(text_arr): """Take a numpy.ndarray containing strings, and convert it into html. If the ndarray contains a single scalar string, that string is converted to html via our sanitized markdown parser. If it contains an array of strings, the strings are individually converted to html and then composed into a table using make_table. If the array contains dimensionality greater than 2, all but two of the dimensions are removed, and a warning message is prefixed to the table. Args: text_arr: A numpy.ndarray containing strings. Returns: The array converted to html. """ if not text_arr.shape: # It is a scalar. No need to put it in a table, just apply markdown return plugin_util.markdown_to_safe_html(np.asscalar(text_arr)) warning = '' if len(text_arr.shape) > 2: warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE % len(text_arr.shape)) text_arr = reduce_to_2d(text_arr) html_arr = [plugin_util.markdown_to_safe_html(x) for x in text_arr.reshape(-1)] html_arr = np.array(html_arr).reshape(text_arr.shape) return warning + make_table(html_arr)
def index_impl(self, experiment=None): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" if self._data_provider: mapping = self._data_provider.list_scalars( experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in six.iteritems(mapping): for (tag, metadatum) in six.iteritems(tag_to_content): description = plugin_util.markdown_to_safe_html( metadatum.description ) result[run][tag] = { "displayName": metadatum.display_name, "description": description, } return result result = collections.defaultdict(lambda: {}) mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME ) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_plugin_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = { "displayName": summary_metadata.display_name, "description": plugin_util.markdown_to_safe_html( summary_metadata.summary_description ), } return result
def _index_impl(self): if self._db_connection_provider: db = self._db_connection_provider() cursor = db.execute( ''' SELECT Runs.run_name, Tags.tag_name, Tags.display_name, Descriptions.description, /* Subtract 2 for leading width and height elements. */ MAX(CAST (Tensors.shape AS INT)) - 2 AS samples FROM Tags JOIN Runs USING (run_id) JOIN Tensors ON Tags.tag_id = Tensors.series LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id WHERE Tags.plugin_name = :plugin /* Shape should correspond to a rank-1 tensor. */ AND NOT INSTR(Tensors.shape, ',') /* Required to use TensorSeriesStepIndex. */ AND Tensors.step IS NOT NULL GROUP BY Tags.tag_id HAVING samples >= 1 ''', {'plugin': metadata.PLUGIN_NAME}) result = collections.defaultdict(dict) for row in cursor: run_name, tag_name, display_name, description, samples = row description = description or '' # Handle missing descriptions. result[run_name][tag_name] = { 'displayName': display_name, 'description': plugin_util.markdown_to_safe_html(description), 'samples': samples } return result runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events] + [0]) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description), 'samples': samples} return result
def _index_impl(self): runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([ len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events ] + [0]) result[run][tag] = { 'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description), 'samples': samples } return result
def _audio_response_for_run(self, tensor_events, run, tag, sample): """Builds a JSON-serializable object with information about audio. Args: tensor_events: A list of image event_accumulator.TensorEvent objects. run: The name of the run. tag: The name of the tag the audio entries all belong to. sample: The zero-indexed sample of the audio sample for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third audio clip of each batch, and steps with fewer than three audio clips will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each audio entry. """ response = [] index = 0 filtered_events = self._filter_by_sample(tensor_events, sample) content_type = self._get_mime_type(run, tag) for (index, tensor_event) in enumerate(filtered_events): data = tensor_util.make_ndarray(tensor_event.tensor_proto) label = data[sample, 1] response.append({ 'wall_time': tensor_event.wall_time, 'step': tensor_event.step, 'label': plugin_util.markdown_to_safe_html(label), 'contentType': content_type, 'query': self._query_for_individual_audio(run, tag, sample, index) }) return response
def _get_tag_to_description(mapping): """Returns a map of tags to descriptions. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series produced by DataProvider's `list_*` methods. Returns: A map from tag strings to description HTML strings. E.g. { "loss": "<h1>Multiple descriptions</h1><h2>For runs: test, train </h2><p>...</p>", "loss2": "<p>The lossy details</p>", } """ tag_to_descriptions, description_to_runs = _get_tag_description_info( mapping) result = {} for tag in tag_to_descriptions: descriptions = sorted(tag_to_descriptions[tag]) if len(descriptions) == 1: description = descriptions[0] else: description = _build_combined_description(descriptions, description_to_runs) result[tag] = plugin_util.markdown_to_safe_html(description) return result
def _audio_response_for_run(self, tensor_events, run, tag, sample): """Builds a JSON-serializable object with information about audio. Args: tensor_events: A list of image event_accumulator.TensorEvent objects. run: The name of the run. tag: The name of the tag the audio entries all belong to. sample: The zero-indexed sample of the audio sample for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third audio clip of each batch, and steps with fewer than three audio clips will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each audio entry. """ response = [] index = 0 filtered_events = self._filter_by_sample(tensor_events, sample) content_type = self._get_mime_type(run, tag) for (index, tensor_event) in enumerate(filtered_events): data = tf.make_ndarray(tensor_event.tensor_proto) label = data[sample, 1] response.append({ 'wall_time': tensor_event.wall_time, 'step': tensor_event.step, 'label': plugin_util.markdown_to_safe_html(label), 'contentType': content_type, 'query': self._query_for_individual_audio(run, tag, sample, index) }) return response
def test_text_array_to_html(self): convert = text_plugin.text_array_to_html scalar = np.array('foo') scalar_expected = '<p>foo</p>' self.assertEqual(convert(scalar), scalar_expected) vector = np.array(['foo', 'bar']) vector_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> </tr> <tr> <td><p>bar</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(vector), vector_expected) d2 = np.array([['foo', 'bar'], ['zoink', 'zod']]) d2_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(d2), d2_expected) d3 = np.array([[['foo', 'bar'], ['zoink', 'zod']], [['FOO', 'BAR'], ['ZOINK', 'ZOD']]]) warning = plugin_util.markdown_to_safe_html( text_plugin.WARNING_TEMPLATE % 3) d3_expected = warning + textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(d3), d3_expected)
def _index_impl(self, experiment): if self._data_provider: mapping = self._data_provider.list_blob_sequences( experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in six.iteritems(mapping): for (tag, metadatum) in six.iteritems(tag_to_content): description = plugin_util.markdown_to_safe_html( metadatum.description ) result[run][tag] = { "displayName": metadatum.display_name, "description": description, "samples": metadatum.max_length - 2, # width, height } return result runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME ) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max( [ len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events ] + [0] ) result[run][tag] = { "displayName": summary_metadata.display_name, "description": plugin_util.markdown_to_safe_html( summary_metadata.summary_description ), "samples": samples, } return result
def tags_impl(self): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute( """ SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? """, (metadata.PLUGIN_NAME,), ) result = {} for (tag_name, display_name, run_name) in cursor: if run_name not in result: result[run_name] = {} result[run_name][tag_name] = { "displayName": display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. "description": "", } else: # Read tags from events files. runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME ) for (run, tag_to_content) in six.iteritems(mapping): for (tag, _) in six.iteritems(tag_to_content): summary_metadata = self._multiplexer.SummaryMetadata( run, tag ) result[run][tag] = { "displayName": summary_metadata.display_name, "description": plugin_util.markdown_to_safe_html( summary_metadata.summary_description ), } return result
def text_array_to_html(text_arr, enable_markdown): """Take a numpy.ndarray containing strings, and convert it into html. If the ndarray contains a single scalar string, that string is converted to html via our sanitized markdown parser. If it contains an array of strings, the strings are individually converted to html and then composed into a table using make_table. If the array contains dimensionality greater than 2, all but two of the dimensions are removed, and a warning message is prefixed to the table. Args: text_arr: A numpy.ndarray containing strings. enable_markdown: boolean, whether to enable Markdown Returns: The array converted to html. """ if not text_arr.shape: # It is a scalar. No need to put it in a table. if enable_markdown: return plugin_util.markdown_to_safe_html(text_arr.item()) else: return plugin_util.safe_html(text_arr.item()) warning = "" if len(text_arr.shape) > 2: warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE % len(text_arr.shape)) text_arr = reduce_to_2d(text_arr) if enable_markdown: table = plugin_util.markdowns_to_safe_html( text_arr.reshape(-1), lambda xs: make_table(np.array(xs).reshape(text_arr.shape)), ) else: # Convert utf-8 bytes to str. The built-in np.char.decode doesn't work on # object arrays, and converting to an numpy chararray is lossy. decode = lambda bs: bs.decode("utf-8") if isinstance(bs, bytes) else bs text_arr_str = np.array([decode(bs) for bs in text_arr.reshape(-1) ]).reshape(text_arr.shape) table = plugin_util.safe_html(make_table(text_arr_str)) return warning + table
def _index_impl(self): """Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., ... }, "runName2": ..., ... } For each tag, `samples` is the greatest number of audio clips that appear at any particular step. (It's not related to "samples of a waveform.") For example, if for tag `minibatch_input` there are five audio clips at step 0 and ten audio clips at step 1, then the dictionary for `"minibatch_input"` will contain `"samples": 10`. """ runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME ) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max( [ self._number_of_samples(event.tensor_proto) for event in tensor_events ] + [0] ) result[run][tag] = { "displayName": summary_metadata.display_name, "description": plugin_util.markdown_to_safe_html( summary_metadata.summary_description ), "samples": samples, } return result
def index_impl(self): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_plugin_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
def index_impl(self): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_summary_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
def _index_impl(self): """Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., }, "runName2": ..., } For each tag, `samples` is the greatest number of images that appear at any particular step. For example, if for tag `input_reshaped` there are 5 samples at step 0 and 10 samples at step 1, then the dictionary for `"input_reshaped"` will contain `"samples": 10`. """ runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([ len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events ] + [0]) result[run][tag] = { 'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description), 'samples': samples } return result
def _index_impl(self): runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([len(event.tensor_proto.string_val[2:]) # width, height for event in tensor_events] + [0]) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description), 'samples': samples} return result
def _index_impl(self, ctx, experiment): mapping = self._data_provider.list_blob_sequences( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in mapping.items(): for (tag, metadatum) in tag_to_content.items(): description = plugin_util.markdown_to_safe_html( metadatum.description) result[run][tag] = { "displayName": metadatum.display_name, "description": description, "samples": metadatum.max_length - 2, # width, height } return result
def _index_impl(self, ctx, experiment): """Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., ... }, "runName2": ..., ... } For each tag, `samples` is the greatest number of audio clips that appear at any particular step. (It's not related to "samples of a waveform.") For example, if for tag `minibatch_input` there are five audio clips at step 0 and ten audio clips at step 1, then the dictionary for `"minibatch_input"` will contain `"samples": 10`. """ mapping = self._data_provider.list_blob_sequences( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_time_series) in mapping.items(): for (tag, time_series) in tag_to_time_series.items(): md = metadata.parse_plugin_metadata(time_series.plugin_content) if not self._version_checker.ok(md.version, run, tag): continue description = plugin_util.markdown_to_safe_html( time_series.description ) result[run][tag] = { "displayName": time_series.display_name, "description": description, "samples": time_series.max_length, } return result
def tags_impl(self): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, _) in six.iteritems(tag_to_content): summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
def tags_impl(self, ctx, experiment): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ mapping = self._data_provider.list_tensors( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME) result = {run: {} for run in mapping} for (run, tag_to_time_series) in six.iteritems(mapping): for (tag, time_series) in tag_to_time_series.items(): result[run][tag] = { "displayName": time_series.display_name, "description": plugin_util.markdown_to_safe_html(time_series.description), } return result
def index_impl(self, ctx, experiment=None): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" mapping = self._data_provider.list_scalars( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in six.iteritems(mapping): for (tag, metadatum) in six.iteritems(tag_to_content): description = plugin_util.markdown_to_safe_html( metadatum.description) result[run][tag] = { "displayName": metadatum.display_name, "description": description, } return result
def _index_impl(self): """Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., ... }, "runName2": ..., ... } For each tag, `samples` is the greatest number of audio clips that appear at any particular step. (It's not related to "samples of a waveform.") For example, if for tag `minibatch_input` there are five audio clips at step 0 and ten audio clips at step 1, then the dictionary for `"minibatch_input"` will contain `"samples": 10`. """ runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for tag in tag_to_content: summary_metadata = self._multiplexer.SummaryMetadata(run, tag) tensor_events = self._multiplexer.Tensors(run, tag) samples = max([self._number_of_samples(event.tensor_proto) for event in tensor_events] + [0]) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description), 'samples': samples} return result
def index_impl(self, ctx, experiment=None): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" mapping = self._data_provider.list_scalars( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in mapping.items(): for (tag, metadatum) in tag_to_content.items(): md = metadata.parse_plugin_metadata(metadatum.plugin_content) if not self._version_checker.ok(md.version, run, tag): continue description = plugin_util.markdown_to_safe_html( metadatum.description) result[run][tag] = { "displayName": metadatum.display_name, "description": description, } return result
def index_impl(self): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute(''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME,)) result = collections.defaultdict(dict) for row in cursor: tag_name, display_name, run_name = row result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } return result runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_plugin_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
def tags_impl(self, ctx, experiment): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ mapping = self._data_provider.list_tensors( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME) result = {run: {} for run in mapping} for (run, tag_to_time_series) in mapping.items(): for (tag, time_series) in tag_to_time_series.items(): md = metadata.parse_plugin_metadata(time_series.plugin_content) if md.version != 0: self._maybe_warn_about_new_metadata(run, tag, md.version) continue result[run][tag] = { "displayName": time_series.display_name, "description": plugin_util.markdown_to_safe_html(time_series.description), } return result
def index_impl(self, ctx, experiment): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" mapping = self._data_provider.list_tensors( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_content) in mapping.items(): for (tag, metadatum) in tag_to_content.items(): description = plugin_util.markdown_to_safe_html( metadatum.description) md = metadata.parse_plugin_metadata(metadatum.plugin_content) if md.version != 0: self._maybe_warn_about_new_metadata(run, tag, md.version) continue result[run][tag] = { "displayName": metadatum.display_name, "description": description, } return result
def test_text_array_to_html(self): convert = text_plugin.text_array_to_html scalar = np.array("foo") scalar_expected = "<p>foo</p>" self.assertEqual(convert(scalar), scalar_expected) # Check that underscores are preserved correctly; this detects erroneous # use of UTF-16 or UTF-32 encoding when calling markdown_to_safe_html(), # which would introduce spurious null bytes and cause undesired <em> tags # around the underscores. scalar_underscores = np.array("word_with_underscores") scalar_underscores_expected = "<p>word_with_underscores</p>" self.assertEqual(convert(scalar_underscores), scalar_underscores_expected) vector = np.array(["foo", "bar"]) vector_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> </tr> <tr> <td><p>bar</p></td> </tr> </tbody> </table> """.rstrip()) self.assertEqual(convert(vector), vector_expected) d2 = np.array([["foo", "bar"], ["zoink", "zod"]]) d2_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table> """.rstrip()) self.assertEqual(convert(d2), d2_expected) d3 = np.array([ [["foo", "bar"], ["zoink", "zod"]], [["FOO", "BAR"], ["ZOINK", "ZOD"]], ]) warning = plugin_util.markdown_to_safe_html( text_plugin.WARNING_TEMPLATE % 3) d3_expected = warning + textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table> """.rstrip()) self.assertEqual(convert(d3), d3_expected)
def test_text_array_to_html(self): convert = text_plugin.text_array_to_html scalar = np.array('foo') scalar_expected = '<p>foo</p>' self.assertEqual(convert(scalar), scalar_expected) # Check that underscores are preserved correctly; this detects erroneous # use of UTF-16 or UTF-32 encoding when calling markdown_to_safe_html(), # which would introduce spurious null bytes and cause undesired <em> tags # around the underscores. scalar_underscores = np.array('word_with_underscores') scalar_underscores_expected = '<p>word_with_underscores</p>' self.assertEqual(convert(scalar_underscores), scalar_underscores_expected) vector = np.array(['foo', 'bar']) vector_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> </tr> <tr> <td><p>bar</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(vector), vector_expected) d2 = np.array([['foo', 'bar'], ['zoink', 'zod']]) d2_expected = textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(d2), d2_expected) d3 = np.array([[['foo', 'bar'], ['zoink', 'zod']], [['FOO', 'BAR'], ['ZOINK', 'ZOD']]]) warning = plugin_util.markdown_to_safe_html( text_plugin.WARNING_TEMPLATE % 3) d3_expected = warning + textwrap.dedent("""\ <table> <tbody> <tr> <td><p>foo</p></td> <td><p>bar</p></td> </tr> <tr> <td><p>zoink</p></td> <td><p>zod</p></td> </tr> </tbody> </table>""") self.assertEqual(convert(d3), d3_expected)
def _test(self, markdown_string, expected): actual = plugin_util.markdown_to_safe_html(markdown_string) self.assertEqual(expected, actual)