def test_audio(self): with tf.compat.v1.Graph().as_default(): audio = tf.reshape(tf.linspace(0.0, 100.0, 4 * 10 * 2), (4, 10, 2)) old_op = tf.compat.v1.summary.audio("k488", audio, 44100) old_value = self._value_from_op(old_op) assert old_value.HasField("audio"), old_value new_value = data_compat.migrate_value(old_value) self.assertEqual("k488/audio/0", new_value.tag) expected_metadata = audio_metadata.create_summary_metadata( display_name="k488/audio/0", description="", encoding=audio_metadata.Encoding.Value("WAV"), converted_to_tensor=True, ) # Check serialized submessages... plugin_content = audio_metadata.parse_plugin_metadata( new_value.metadata.plugin_data.content) expected_content = audio_metadata.parse_plugin_metadata( expected_metadata.plugin_data.content) self.assertEqual(plugin_content, expected_content) # ...then check full metadata except plugin content, since # serialized forms need not be identical. new_value.metadata.plugin_data.content = ( expected_metadata.plugin_data.content) self.assertEqual(expected_metadata, new_value.metadata) self.assertTrue(new_value.HasField("tensor")) data = tensor_util.make_ndarray(new_value.tensor) self.assertEqual((1, 2), data.shape) self.assertEqual( tf.compat.as_bytes(old_value.audio.encoded_audio_string), data[0][0]) self.assertEqual(b"", data[0][1]) # empty label
def test_metadata(self): data = np.array(1, np.float32, ndmin=3) description = "Piano Concerto No. 23 (K488), in **A major.**" pb = self.audio("k488", data, 44100, description=description) self.assertEqual(len(pb.value), 1) summary_metadata = pb.value[0].metadata self.assertEqual(summary_metadata.summary_description, description) plugin_data = summary_metadata.plugin_data self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME) content = summary_metadata.plugin_data.content parsed = metadata.parse_plugin_metadata(content) self.assertEqual(parsed.encoding, metadata.Encoding.Value("WAV"))
def test_metadata(self): pb = self.compute_and_check_summary_pb('k488', self.stereo) self.assertEqual(len(pb.value), 1) self.assertEqual(pb.value[0].tag, 'k488/audio_summary') summary_metadata = pb.value[0].metadata self.assertEqual(summary_metadata.display_name, 'k488') self.assertEqual(summary_metadata.summary_description, '') plugin_data = summary_metadata.plugin_data self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME) content = summary_metadata.plugin_data.content parsed = metadata.parse_plugin_metadata(content) self.assertEqual(parsed.encoding, metadata.Encoding.Value('WAV'))
def _get_mime_type(self, ctx, experiment, run, tag): # TODO(@wchargin): Move this call from `/audio` (called many # times) to `/tags` (called few times) to reduce data provider # calls. mapping = self._data_provider.list_blob_sequences( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) time_series = mapping.get(run, {}).get(tag, None) if time_series is None: raise errors.NotFoundError( "No audio data for run=%r, tag=%r" % (run, tag) ) parsed = metadata.parse_plugin_metadata(time_series.plugin_content) return _MIME_TYPES.get(parsed.encoding, _DEFAULT_MIME_TYPE)
def test_metadata_with_explicit_name_and_description(self): display_name = 'Piano Concerto No. 23 (K488)' description = 'In **A major.**' pb = self.compute_and_check_summary_pb( 'k488', self.stereo, display_name=display_name, description=description) self.assertEqual(len(pb.value), 1) self.assertEqual(pb.value[0].tag, 'k488/audio_summary') summary_metadata = pb.value[0].metadata self.assertEqual(summary_metadata.display_name, display_name) self.assertEqual(summary_metadata.summary_description, description) plugin_data = summary_metadata.plugin_data self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME) content = summary_metadata.plugin_data.content parsed = metadata.parse_plugin_metadata(content) self.assertEqual(parsed.encoding, metadata.Encoding.Value('WAV'))
def _index_impl(self, ctx, experiment): """Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., ... }, "runName2": ..., ... } For each tag, `samples` is the greatest number of audio clips that appear at any particular step. (It's not related to "samples of a waveform.") For example, if for tag `minibatch_input` there are five audio clips at step 0 and ten audio clips at step 1, then the dictionary for `"minibatch_input"` will contain `"samples": 10`. """ mapping = self._data_provider.list_blob_sequences( ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME, ) result = {run: {} for run in mapping} for (run, tag_to_time_series) in mapping.items(): for (tag, time_series) in tag_to_time_series.items(): md = metadata.parse_plugin_metadata(time_series.plugin_content) if not self._version_checker.ok(md.version, run, tag): continue description = plugin_util.markdown_to_safe_html( time_series.description ) result[run][tag] = { "displayName": time_series.display_name, "description": description, "samples": time_series.max_length, } return result
def _get_mime_type(self, run, tag): content = self._multiplexer.SummaryMetadata( run, tag ).plugin_data.content parsed = metadata.parse_plugin_metadata(content) return _MIME_TYPES.get(parsed.encoding, _DEFAULT_MIME_TYPE)
def _get_mime_type(self, run, tag): content = self._multiplexer.SummaryMetadata(run, tag).plugin_data.content parsed = metadata.parse_plugin_metadata(content) return _MIME_TYPES.get(parsed.encoding, _DEFAULT_MIME_TYPE)