Beispiel #1
0
 def initialize_graph(self):
     self._audio_placeholder = tf.placeholder(dtype=tf.float32,
                                              name='image_to_encode')
     self._samples_per_second_placeholder = tf.placeholder(
         dtype=tf.int32, name='samples_per_second')
     self._encode_op = tf.contrib.ffmpeg.encode_audio(
         self._audio_placeholder,
         file_format='wav',
         samples_per_second=self._samples_per_second_placeholder)
Beispiel #2
0
  def setUp(self):
    self.log_dir = tempfile.mkdtemp()

    # We use numpy.random to generate images. We seed to avoid non-determinism
    # in this test.
    numpy.random.seed(42)

    # Create old-style image summaries for run "foo".
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    tf.summary.image(name="baz", tensor=placeholder)
    merged_summary_op = tf.summary.merge_all()
    foo_directory = os.path.join(self.log_dir, "foo")
    writer = tf.summary.FileWriter(foo_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 16, 42, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Create new-style image summaries for run bar.
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    summary.op(name="quux", images=placeholder,
               description="how do you pronounce that, anyway?")
    merged_summary_op = tf.summary.merge_all()
    bar_directory = os.path.join(self.log_dir, "bar")
    writer = tf.summary.FileWriter(bar_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 8, 6, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Start a server with the plugin.
    multiplexer = event_multiplexer.EventMultiplexer({
        "foo": foo_directory,
        "bar": bar_directory,
    })
    context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    plugin = images_plugin.ImagesPlugin(context)
    # Setting a reload interval of -1 disables reloading. We disable reloading
    # because we seek to block tests from running til after one reload finishes.
    # This setUp method thus manually reloads the multiplexer. TensorBoard would
    # otherwise reload in a non-blocking thread.
    wsgi_app = application.TensorBoardWSGIApp(
        self.log_dir, [plugin], multiplexer, reload_interval=-1, path_prefix='')
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
    multiplexer.Reload()
    self.routes = plugin.get_plugin_apps()
Beispiel #3
0
  def testNewStyleScalarSummary(self):
    """Verify processing of tensorboard.plugins.scalar.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      step = tf.placeholder(tf.float32, shape=[])
      scalar_summary.op('accuracy', 1.0 - 1.0 / (step + tf.constant(1.0)))
      scalar_summary.op('xent', 1.0 / (step + tf.constant(1.0)))
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged, feed_dict={step: float(i)})
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'accuracy/scalar_summary',
        u'xent/scalar_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
    def generate_testdata(self, include_text=True, logdir=None):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.string)
        summary_tensor = tf.summary.text('message', placeholder)
        vector_summary = tf.summary.text('vector', placeholder)
        scalar_summary = tf.summary.scalar('twelve', tf.constant(12))

        run_names = ['fry', 'leela']
        for run_name in run_names:
            subdir = os.path.join(logdir or self.logdir, run_name)
            writer = tf.summary.FileWriter(subdir)
            writer.add_graph(sess.graph)

            step = 0
            for gem in GEMS:
                message = run_name + ' *loves* ' + gem
                feed_dict = {
                    placeholder: message,
                }
                if include_text:
                    summ = sess.run(summary_tensor, feed_dict=feed_dict)
                    writer.add_summary(summ, global_step=step)
                step += 1

            vector_message = ['one', 'two', 'three', 'four']
            if include_text:
                summ = sess.run(vector_summary,
                                feed_dict={placeholder: vector_message})
                writer.add_summary(summ)

            summ = sess.run(scalar_summary, feed_dict={placeholder: []})
            writer.add_summary(summ)

            writer.close()
    def generate_run(self, run_name):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32, shape=[3])

        if run_name == self._RUN_WITH_LEGACY_HISTOGRAM:
            tf.summary.histogram(self._LEGACY_HISTOGRAM_TAG, placeholder)
        elif run_name == self._RUN_WITH_HISTOGRAM:
            summary.op(self._HISTOGRAM_TAG,
                       placeholder,
                       display_name=self._DISPLAY_NAME,
                       description=self._DESCRIPTION)
        elif run_name == self._RUN_WITH_SCALARS:
            tf.summary.scalar(self._SCALAR_TAG, tf.reduce_mean(placeholder))
        else:
            assert False, 'Invalid run name: %r' % run_name
        summ = tf.summary.merge_all()

        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)
        for step in xrange(self._STEPS):
            feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
            s = sess.run(summ, feed_dict=feed_dict)
            writer.add_summary(s, global_step=step)
        writer.close()
def WriteAudioSeries(writer, tag, n_audio=1):
    """Write a few dummy audio clips to writer."""
    step = 0
    session = tf.Session()

    min_frequency_hz = 440
    max_frequency_hz = 880
    sample_rate = 4000
    duration_frames = sample_rate // 2  # 0.5 seconds.
    frequencies_per_run = 1
    num_channels = 2

    p = tf.placeholder("float32",
                       (frequencies_per_run, duration_frames, num_channels))
    s = tf.summary.audio(tag, p, sample_rate)

    for _ in xrange(n_audio):
        # Generate a different frequency for each channel to show stereo works.
        frequencies = np.random.random_integers(min_frequency_hz,
                                                max_frequency_hz,
                                                size=(frequencies_per_run,
                                                      num_channels))
        tiled_frequencies = np.tile(frequencies, (1, duration_frames))
        tiled_increments = np.tile(np.arange(0, duration_frames),
                                   (num_channels, 1)).T.reshape(
                                       1, duration_frames * num_channels)
        tones = np.sin(2.0 * np.pi * tiled_frequencies * tiled_increments /
                       sample_rate)
        tones = tones.reshape(frequencies_per_run, duration_frames,
                              num_channels)

        summ = session.run(s, feed_dict={p: tones})
        writer.add_summary(summ, step)
        step += 20
    session.close()
Beispiel #7
0
 def test_when_bucket_count_not_statically_known(self):
   placeholder = tf.placeholder(tf.int32, shape=())
   bucket_count = 44
   pb = self.compute_and_check_summary_pb(
       bucket_count=bucket_count,
       bucket_count_tensor=placeholder,
       feed_dict={placeholder: bucket_count})
   buckets = tf.make_ndarray(pb.value[0].tensor)
   self.assertEqual(buckets.shape, (bucket_count, 3))
def WriteImageSeries(writer, tag, n_images=1):
    """Write a few dummy images to writer."""
    step = 0
    session = tf.Session()
    p = tf.placeholder("uint8", (1, 4, 4, 3))
    s = tf.summary.image(tag, p)
    for _ in xrange(n_images):
        im = np.random.random_integers(0, 255, (1, 4, 4, 3))
        summ = session.run(s, feed_dict={p: im})
        writer.add_summary(summ, step)
        step += 20
    session.close()
Beispiel #9
0
    def _test_dimensions(self, alpha=False, static_dimensions=True):
        if not alpha:
            images = self.images
            channel_count = 3
        else:
            images = self.images_with_alpha
            channel_count = 4

        if static_dimensions:
            images_tensor = tf.constant(images, dtype=tf.uint8)
            feed_dict = {}
        else:
            images_tensor = tf.placeholder(tf.uint8)
            feed_dict = {images_tensor: images}

        pb = self.compute_and_check_summary_pb('mona_lisa',
                                               images,
                                               images_tensor=images_tensor,
                                               feed_dict=feed_dict)
        self.assertEqual(1, len(pb.value))
        result = pb.value[0].tensor.string_val

        # Check annotated dimensions.
        self.assertEqual(tf.compat.as_bytes(str(self.image_width)), result[0])
        self.assertEqual(tf.compat.as_bytes(str(self.image_height)), result[1])

        # Check actual image dimensions.
        images = result[2:]
        with tf.Session() as sess:
            placeholder = tf.placeholder(tf.string)
            decoder = tf.image.decode_png(placeholder)
            for image in images:
                decoded = sess.run(decoder, feed_dict={placeholder: image})
                self.assertEqual(
                    (self.image_height, self.image_width, channel_count),
                    decoded.shape)
Beispiel #10
0
def run_all(logdir):
    tf.reset_default_graph()
    step_placeholder = tf.placeholder(tf.int32)

    with tf.name_scope('simple_example'):
        simple_example(step_placeholder)
    with tf.name_scope('markdown_table'):
        markdown_table(step_placeholder)
    with tf.name_scope('higher_order_tensors'):
        higher_order_tensors(step_placeholder)
    all_summaries = tf.summary.merge_all()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir)
        writer.add_graph(sess.graph)
        for step in xrange(STEPS):
            s = sess.run(all_summaries, feed_dict={step_placeholder: step})
            writer.add_summary(s, global_step=step)
        writer.close()
Beispiel #11
0
    def _get_writer_fn(self, event_batch):
        key = (event_batch.experiment_name, event_batch.run_name)
        if key in self._writer_fn_cache:
            return self._writer_fn_cache[key]
        with tf.Graph().as_default():
            placeholder = tf.placeholder(shape=[], dtype=tf.string)
            writer = tf.contrib.summary.create_db_writer(
                self._db_path,
                experiment_name=event_batch.experiment_name,
                run_name=event_batch.run_name)
            with writer.as_default():
                # TODO(nickfelt): running import_event() one record at a time is very
                #   slow; we should add an op that accepts a vector of records.
                import_op = tf.contrib.summary.import_event(placeholder)
            session = tf.Session()
            session.run(writer.init())

            def writer_fn(event_proto):
                session.run(import_op, feed_dict={placeholder: event_proto})

        self._writer_fn_cache[key] = writer_fn
        return writer_fn
Beispiel #12
0
    def generate_run_to_db(self, experiment_name, run_name):
        tf.reset_default_graph()

        global_step = tf.placeholder(tf.int64)
        db_writer = tf.contrib.summary.create_db_writer(
            db_uri=self.db_path,
            experiment_name=experiment_name,
            run_name=run_name,
            user_name='user')

        scalar_ops = None
        with db_writer.as_default(
        ), tf.contrib.summary.always_record_summaries():
            tf.contrib.summary.scalar(self._SCALAR_TAG, 42, step=global_step)
            flush_op = tf.contrib.summary.flush(db_writer._resource)

        with tf.Session() as sess:
            sess.run(tf.contrib.summary.summary_writer_initializer_op())
            for step in xrange(self._STEPS):
                feed_dict = {global_step: step}
                sess.run(tf.contrib.summary.all_summary_ops(),
                         feed_dict=feed_dict)
            sess.run(flush_op)
Beispiel #13
0
    def testTFSummaryScalar(self):
        """Verify processing of tf.summary.scalar."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = tf.summary.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with self.test_session() as sess:
            ipt = tf.placeholder(tf.float32)
            tf.summary.scalar('scalar1', ipt)
            tf.summary.scalar('scalar2', ipt * ipt)
            merged = tf.summary.merge_all()
            writer.add_graph(sess.graph)
            for i in xrange(10):
                summ = sess.run(merged, feed_dict={ipt: i})
                writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        seq1 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)
        ]
        seq2 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i * i)
            for i in xrange(10)
        ]

        self.assertTagsEqual(
            accumulator.Tags(), {
                ea.SCALARS: ['scalar1', 'scalar2'],
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            })

        self.assertEqual(accumulator.Scalars('scalar1'), seq1)
        self.assertEqual(accumulator.Scalars('scalar2'), seq2)
        first_value = accumulator.Scalars('scalar1')[0].value
        self.assertTrue(isinstance(first_value, float))
Beispiel #14
0
def run(logdir, run_name, wave_name, wave_constructor):
    """Generate wave data of the given form.

  The provided function `wave_constructor` should accept a scalar tensor
  of type float32, representing the frequency (in Hz) at which to
  construct a wave, and return a tensor of shape [1, _samples(), `n`]
  representing audio data (for some number of channels `n`).

  Waves will be generated at frequencies ranging from A4 to A5.

  Arguments:
    logdir: the top-level directory into which to write summary data
    run_name: the name of this run; will be created as a subdirectory
      under logdir
    wave_name: the name of the wave being generated
    wave_constructor: see above
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    # On each step `i`, we'll set this placeholder to `i`. This allows us
    # to know "what time it is" at each step.
    step_placeholder = tf.placeholder(tf.float32, shape=[])

    # We want to linearly interpolate a frequency between A4 (440 Hz) and
    # A5 (880 Hz).
    with tf.name_scope('compute_frequency'):
        f_min = 440.0
        f_max = 880.0
        t = step_placeholder / (FLAGS.steps - 1)
        frequency = f_min * (1.0 - t) + f_max * t

    # Let's log this frequency, just so that we can make sure that it's as
    # expected.
    tf.summary.scalar('frequency', frequency)

    # Now, we pass this to the wave constructor to get our waveform. Doing
    # so within a name scope means that any summaries that the wave
    # constructor produces will be namespaced.
    with tf.name_scope(wave_name):
        waveform = wave_constructor(frequency)

    # We also have the opportunity to annotate each audio clip with a
    # label. This is a good place to include the frequency, because it'll
    # be visible immediately next to the audio clip.
    with tf.name_scope('compute_labels'):
        samples = tf.shape(waveform)[0]
        wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
        frequencies = tf.string_join([
            "*Frequency:* ",
            tf.tile([tf.as_string(frequency, precision=2)], [samples]),
            " Hz.",
        ])
        samples = tf.string_join([
            "*Sample:* ",
            tf.as_string(tf.range(samples) + 1),
            " of ",
            tf.as_string(samples),
            ".",
        ])
        labels = tf.string_join([wave_types, frequencies, samples],
                                separator=" ")

    # We can place a description next to the summary in TensorBoard. This
    # is a good place to explain what the summary represents, methodology
    # for creating it, etc. Let's include the source code of the function
    # that generated the wave.
    source = '\n'.join('    %s' % line.rstrip()
                       for line in inspect.getsourcelines(wave_constructor)[0])
    description = ("A wave of type `%r`, generated via:\n\n%s" %
                   (wave_name, source))

    # Here's the crucial piece: we interpret this result as audio.
    summary.op('waveform',
               waveform,
               FLAGS.sample_rate,
               labels=labels,
               display_name=wave_name,
               description=description)

    # Now, we can collect up all the summaries and begin the run.
    summ = tf.summary.merge_all()

    sess = tf.Session()
    writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
    writer.add_graph(sess.graph)
    sess.run(tf.global_variables_initializer())
    for step in xrange(FLAGS.steps):
        s = sess.run(summ, feed_dict={step_placeholder: float(step)})
        writer.add_summary(s, global_step=step)
    writer.close()
Beispiel #15
0
def run_sobel(logdir, verbose=False):
  """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: sobel')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

  with tf.name_scope('horizontal_kernel'):
    kernel_side_length = kernel_radius * 2 + 1
    # Drop off influence for pixels further away from the center.
    weighting_kernel = (
        1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
    differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
    horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1),
                                  tf.expand_dims(differentiation_kernel, 0))

  with tf.name_scope('vertical_kernel'):
    vertical_kernel = tf.transpose(horizontal_kernel)

  float_image = tf.cast(image, tf.float32)
  dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
  dy = convolve(float_image, vertical_kernel, name='convolve_dy')
  gradient_magnitude = tf.norm([dx, dy], axis=0, name='gradient_magnitude')
  with tf.name_scope('normalized_gradient'):
    normalized_gradient = gradient_magnitude / tf.reduce_max(gradient_magnitude)
  with tf.name_scope('output_image'):
    output_image = tf.cast(255 * normalized_gradient, tf.uint8)

  summ = image_summary.op(
      'sobel', tf.stack([output_image]),
      display_name='Sobel edge detection',
      description=(u'Demonstration of [Sobel edge detection]. The step '
                   'parameter adjusts the radius of the kernel. '
                   'The kernel can be of arbitrary size, and considers '
                   u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
                   # (that says ``$\ell_2$-linear falloff'')
                   'Edge detection is done on a per-channel basis, so '
                   'you can observe which edges are “mostly red '
                   'edges,” for instance.\n\n'
                   'For practical edge detection, a small kernel '
                   '(usually not more than more than *r*=2) is best.\n\n'
                   '[Sobel edge detection]: %s\n\n'
                   "%s"
                   % ('https://en.wikipedia.org/wiki/Sobel_operator',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info("--- sobel: step: %s" % step)
        feed_dict = {kernel_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
Beispiel #16
0
def run_box_to_gaussian(logdir, verbose=False):
  """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: box_to_gaussian')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
  with tf.name_scope('filter'):
    blur_side_length = blur_radius * 2 + 1
    pixel_filter = tf.ones((blur_side_length, blur_side_length))
    pixel_filter = (pixel_filter
                    / tf.cast(tf.size(pixel_filter), tf.float32))  # normalize

  iterations = 4
  images = [tf.cast(image, tf.float32) / 255.0]
  for _ in xrange(iterations):
    images.append(convolve(images[-1], pixel_filter))
  with tf.name_scope('convert_to_uint8'):
    images = tf.stack(
        [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
         for image_ in images])

  summ = image_summary.op(
      'box_to_gaussian', images, max_outputs=iterations,
      display_name='Gaussian blur as a limit process of box blurs',
      description=('Demonstration of forming a Gaussian blur by '
                   'composing box blurs, each of which can be expressed '
                   'as a 2D convolution.\n\n'
                   'A Gaussian blur is formed by convolving a Gaussian '
                   'kernel over an image. But a Gaussian kernel is '
                   'itself the limit of convolving a constant kernel '
                   'with itself many times. Thus, while applying '
                   'a box-filter convolution just once produces '
                   'results that are noticeably different from those '
                   'of a Gaussian blur, repeating the same convolution '
                   'just a few times causes the result to rapidly '
                   'converge to an actual Gaussian blur.\n\n'
                   'Here, the step value controls the blur radius, '
                   'and the image sample controls the number of times '
                   'that the convolution is applied (plus one). '
                   'So, when *sample*=1, the original image is shown; '
                   '*sample*=2 shows a box blur; and a hypothetical '
                   '*sample*=∞ would show a true Gaussian blur.\n\n'
                   'This is one ingredient in a recipe to compute very '
                   'fast Gaussian blurs. The other pieces require '
                   'special treatment for the box blurs themselves '
                   '(decomposition to dual one-dimensional box blurs, '
                   'each of which is computed with a sliding window); '
                   'we don’t perform those optimizations here.\n\n'
                   '[Here are some slides describing the full process.]'
                   '(%s)\n\n'
                   '%s'
                   % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info('--- box_to_gaussian: step: %s' % step)
        feed_dict = {blur_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
def run():
    """Run custom scalar demo and generate event files."""
    step = tf.placeholder(tf.float32, shape=[])

    with tf.name_scope('loss'):
        # Specify 2 different loss values, each tagged differently.
        summary_lib.scalar('foo', tf.pow(0.9, step))
        summary_lib.scalar('bar', tf.pow(0.85, step + 2))

        # Log metric baz as well as upper and lower bounds for a margin chart.
        middle_baz_value = step + 4 * tf.random_uniform([]) - 2
        summary_lib.scalar('baz', middle_baz_value)
        summary_lib.scalar('baz_lower',
                           middle_baz_value - 6.42 - tf.random_uniform([]))
        summary_lib.scalar('baz_upper',
                           middle_baz_value + 6.42 + tf.random_uniform([]))

    with tf.name_scope('trigFunctions'):
        summary_lib.scalar('cosine', tf.cos(step))
        summary_lib.scalar('sine', tf.sin(step))
        summary_lib.scalar('tangent', tf.tan(step))

    merged_summary = tf.summary.merge_all()

    with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
        # We only need to specify the layout once (instead of per step).
        layout_summary = summary_lib.custom_scalar_pb(
            layout_pb2.Layout(category=[
                layout_pb2.Category(
                    title='losses',
                    chart=[
                        layout_pb2.Chart(
                            title='losses',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'loss(?!.*margin.*)'], )),
                        layout_pb2.Chart(
                            title='baz',
                            margin=layout_pb2.MarginChartContent(series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='loss/baz_lower/scalar_summary',
                                    upper='loss/baz_upper/scalar_summary'),
                            ], )),
                    ]),
                layout_pb2.Category(
                    title='trig functions',
                    chart=[
                        layout_pb2.Chart(
                            title='wave trig functions',
                            multiline=layout_pb2.MultilineChartContent(tag=[
                                r'trigFunctions/cosine', r'trigFunctions/sine'
                            ], )),
                        # The range of tangent is different. Give it its own chart.
                        layout_pb2.Chart(
                            title='tan',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'trigFunctions/tangent'], )),
                    ],
                    # This category we care less about. Make it initially closed.
                    closed=True),
            ]))
        writer.add_summary(layout_summary)

        for i in xrange(42):
            summary = sess.run(merged_summary, feed_dict={step: i})
            writer.add_summary(summary, global_step=i)
Beispiel #18
0
def run_all(logdir, verbose=False):
  """Generate a bunch of histogram data, and write it to logdir."""
  del verbose

  tf.set_random_seed(0)

  k = tf.placeholder(tf.float32)

  # Make a normal distribution, with a shifting mean
  mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
  # Record that distribution into a histogram summary
  histogram_summary.op("normal/moving_mean",
                       mean_moving_normal,
                       description="A normal distribution whose mean changes "
                                   "over time.")

  # Make a normal distribution with shrinking variance
  shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
  # Record that distribution too
  histogram_summary.op("normal/shrinking_variance", shrinking_normal,
                       description="A normal distribution whose variance "
                                   "shrinks over time.")

  # Let's combine both of those distributions into one dataset
  normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0)
  # We add another histogram summary to record the combined distribution
  histogram_summary.op("normal/bimodal", normal_combined,
                       description="A combination of two normal distributions, "
                                   "one with a moving mean and one with  "
                                   "shrinking variance. The result is a "
                                   "distribution that starts as unimodal and "
                                   "becomes more and more bimodal over time.")

  # Add a gamma distribution
  gamma = tf.random_gamma(shape=[1000], alpha=k)
  histogram_summary.op("gamma", gamma,
                       description="A gamma distribution whose shape "
                                   "parameter, α, changes over time.")

  # And a poisson distribution
  poisson = tf.random_poisson(shape=[1000], lam=k)
  histogram_summary.op("poisson", poisson,
                       description="A Poisson distribution, which only "
                                   "takes on integer values.")

  # And a uniform distribution
  uniform = tf.random_uniform(shape=[1000], maxval=k*10)
  histogram_summary.op("uniform", uniform,
                       description="A simple uniform distribution.")

  # Finally, combine everything together!
  all_distributions = [mean_moving_normal, shrinking_normal,
                       gamma, poisson, uniform]
  all_combined = tf.concat(all_distributions, 0)
  histogram_summary.op("all_combined", all_combined,
                       description="An amalgamation of five distributions: a "
                                   "uniform distribution, a gamma "
                                   "distribution, a Poisson distribution, and "
                                   "two normal distributions.")

  summaries = tf.summary.merge_all()

  # Setup a session and summary writer
  sess = tf.Session()
  writer = tf.summary.FileWriter(logdir)

  # Setup a loop and write the summaries to disk
  N = 400
  for step in xrange(N):
    k_val = step/float(N)
    summ = sess.run(summaries, feed_dict={k: k_val})
    writer.add_summary(summ, global_step=step)
Beispiel #19
0
def start_runs(logdir,
               steps,
               run_name,
               thresholds,
               mask_every_other_prediction=False):
    """Generate a PR curve with precision and recall evenly weighted.

  Arguments:
    logdir: The directory into which to store all the runs' data.
    steps: The number of steps to run for.
    run_name: The name of the run.
    thresholds: The number of thresholds to use for PR curves.
    mask_every_other_prediction: Whether to mask every other prediction by
      alternating weights between 0 and 1.
  """
    tf.reset_default_graph()
    tf.set_random_seed(42)

    # Create a normal distribution layer used to generate true color labels.
    distribution = tf.distributions.Normal(loc=0., scale=142.)

    # Sample the distribution to generate colors. Lets generate different numbers
    # of each color. The first dimension is the count of examples.

    # The calls to sample() are given fixed random seed values that are "magic"
    # in that they correspond to the default seeds for those ops when the PR
    # curve test (which depends on this code) was written. We've pinned these
    # instead of continuing to use the defaults since the defaults are based on
    # node IDs from the sequence of nodes added to the graph, which can silently
    # change when this code or any TF op implementations it uses are modified.

    # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.

    # Generate reds.
    number_of_reds = 100
    true_reds = tf.clip_by_value(
        tf.concat([
            255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
            tf.abs(distribution.sample([number_of_reds, 2], seed=34))
        ],
                  axis=1), 0, 255)

    # Generate greens.
    number_of_greens = 200
    true_greens = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
            255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
            tf.abs(distribution.sample([number_of_greens, 1], seed=105))
        ],
                  axis=1), 0, 255)

    # Generate blues.
    number_of_blues = 150
    true_blues = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
            255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153))
        ],
                  axis=1), 0, 255)

    # Assign each color a vector of 3 booleans based on its true label.
    labels = tf.concat([
        tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
        tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
        tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
    ],
                       axis=0)

    # We introduce 3 normal distributions. They are used to predict whether a
    # color falls under a certain class (based on distances from corners of the
    # color triangle). The distributions vary per color. We have the distributions
    # narrow over time.
    initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
    iteration = tf.placeholder(tf.int32, shape=[])
    red_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[0] - iteration,
                      dtype=tf.float32))
    green_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[1] - iteration,
                      dtype=tf.float32))
    blue_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[2] - iteration,
                      dtype=tf.float32))

    # Make predictions (assign 3 probabilities to each color based on each color's
    # distance to each of the 3 corners). We seek double the area in the right
    # tail of the normal distribution.
    examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
    probabilities_colors_are_red = (1 - red_predictor.cdf(
        tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2
    probabilities_colors_are_green = (1 - green_predictor.cdf(
        tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2
    probabilities_colors_are_blue = (1 - blue_predictor.cdf(
        tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2

    predictions = (probabilities_colors_are_red,
                   probabilities_colors_are_green,
                   probabilities_colors_are_blue)

    # This is the crucial piece. We write data required for generating PR curves.
    # We create 1 summary per class because we create 1 PR curve per class.
    for i, color in enumerate(('red', 'green', 'blue')):
        description = (
            'The probabilities used to create this PR curve are '
            'generated from a normal distribution. Its standard '
            'deviation is initially %0.0f and decreases over time.' %
            initial_standard_deviations[i])

        weights = None
        if mask_every_other_prediction:
            # Assign a weight of 0 to every even-indexed prediction. Odd-indexed
            # predictions are assigned a default weight of 1.
            consecutive_indices = tf.reshape(tf.range(tf.size(predictions[i])),
                                             tf.shape(predictions[i]))
            weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)

        summary.op(name=color,
                   labels=labels[:, i],
                   predictions=predictions[i],
                   num_thresholds=thresholds,
                   weights=weights,
                   display_name='classifying %s' % color,
                   description=description)
    merged_summary_op = tf.summary.merge_all()
    events_directory = os.path.join(logdir, run_name)
    sess = tf.Session()
    writer = tf.summary.FileWriter(events_directory, sess.graph)

    for step in xrange(steps):
        feed_dict = {
            iteration: step,
        }
        merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
        writer.add_summary(merged_summary, step)

    writer.close()
Beispiel #20
0
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate audio. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create old-style audio summaries for run "foo".
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32)
        tf.summary.audio(name="baz", tensor=placeholder, sample_rate=44100)
        merged_summary_op = tf.summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        writer = tf.summary.FileWriter(foo_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={placeholder: numpy.random.rand(42, 22050) * 2 - 1}),
                               global_step=step)
        writer.close()

        # Create new-style audio summaries for run "bar".
        tf.reset_default_graph()
        sess = tf.Session()
        audio_placeholder = tf.placeholder(tf.float32)
        labels_placeholder = tf.placeholder(tf.string)
        summary.op("quux",
                   audio_placeholder,
                   sample_rate=44100,
                   labels=labels_placeholder,
                   description="how do you pronounce that, anyway?")
        merged_summary_op = tf.summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        writer = tf.summary.FileWriter(bar_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={
                    audio_placeholder:
                    numpy.random.rand(42, 11025, 1) * 2 - 1,
                    labels_placeholder: [
                        tf.compat.as_bytes('step **%s**, sample %s' %
                                           (step, sample))
                        for sample in xrange(42)
                    ],
                }),
                               global_step=step)
        writer.close()

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        context = base_plugin.TBContext(logdir=self.log_dir,
                                        multiplexer=multiplexer)
        self.plugin = audio_plugin.AudioPlugin(context)
        # Setting a reload interval of -1 disables reloading. We disable reloading
        # because we seek to block tests from running til after one reload finishes.
        # This setUp method thus manually reloads the multiplexer. TensorBoard would
        # otherwise reload in a non-blocking thread.
        wsgi_app = application.TensorBoardWSGIApp(self.log_dir, [self.plugin],
                                                  multiplexer,
                                                  reload_interval=-1,
                                                  path_prefix='')
        self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
        multiplexer.Reload()
Beispiel #21
0
 def test_when_shape_not_statically_known(self):
   placeholder = tf.placeholder(tf.float64, shape=None)
   reshaped = self.gaussian.reshape((25, -1))
   self.compute_and_check_summary_pb(data=reshaped,
                                     data_tensor=placeholder,
                                     feed_dict={placeholder: reshaped})
Beispiel #22
0
 def initialize_graph(self):
     self._image_placeholder = tf.placeholder(dtype=tf.uint8,
                                              name='image_to_encode')
     self._encode_op = tf.image.encode_png(self._image_placeholder)
Beispiel #23
0
 def initialize_graph(self):
     self._input = tf.placeholder(tf.int32)
     self._squarer = tf.square(self._input)