示例#1
0
  def setUp(self):
    self.log_dir = tempfile.mkdtemp()

    # We use numpy.random to generate images. We seed to avoid non-determinism
    # in this test.
    numpy.random.seed(42)

    # Create old-style image summaries for run "foo".
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    tf.summary.image(name="baz", tensor=placeholder)
    merged_summary_op = tf.summary.merge_all()
    foo_directory = os.path.join(self.log_dir, "foo")
    writer = tf.summary.FileWriter(foo_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 16, 42, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Create new-style image summaries for run bar.
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    summary.op(name="quux", images=placeholder,
               description="how do you pronounce that, anyway?")
    merged_summary_op = tf.summary.merge_all()
    bar_directory = os.path.join(self.log_dir, "bar")
    writer = tf.summary.FileWriter(bar_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 8, 6, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Start a server with the plugin.
    multiplexer = event_multiplexer.EventMultiplexer({
        "foo": foo_directory,
        "bar": bar_directory,
    })
    context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    plugin = images_plugin.ImagesPlugin(context)
    # Setting a reload interval of -1 disables reloading. We disable reloading
    # because we seek to block tests from running til after one reload finishes.
    # This setUp method thus manually reloads the multiplexer. TensorBoard would
    # otherwise reload in a non-blocking thread.
    wsgi_app = application.TensorBoardWSGIApp(
        self.log_dir, [plugin], multiplexer, reload_interval=-1, path_prefix='')
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
    multiplexer.Reload()
    self.routes = plugin.get_plugin_apps()
示例#2
0
    def generate_run(self, run_name, include_graph):
        """Create a run with a text summary, metadata, and optionally a graph."""
        tf.reset_default_graph()
        k1 = tf.constant(math.pi, name='k1')
        k2 = tf.constant(math.e, name='k2')
        result = (k1**k2) - k1
        expected = tf.constant(20.0, name='expected')
        error = tf.abs(result - expected, name='error')
        message_prefix_value = 'error ' * 1000
        true_length = len(message_prefix_value)
        assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
        message_prefix = tf.constant(message_prefix_value,
                                     name='message_prefix')
        error_message = tf.string_join(
            [message_prefix,
             tf.as_string(error, name='error_string')],
            name='error_message')
        summary_message = tf.summary.text('summary_message', error_message)

        sess = tf.Session()
        writer = tf.summary.FileWriter(os.path.join(self.logdir, run_name))
        if include_graph:
            writer.add_graph(sess.graph)
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        s = sess.run(summary_message,
                     options=options,
                     run_metadata=run_metadata)
        writer.add_summary(s)
        writer.add_run_metadata(run_metadata, self._METADATA_TAG)
        writer.close()
示例#3
0
    def generate_testdata(self):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.constant('I am deprecated.')

        # Previously, we had used a means of creating text summaries that used
        # plugin assets (which loaded JSON files containing runs and tags). The
        # plugin must continue to be able to load summaries of that format, so we
        # create a summary using that old plugin asset-based method here.
        plugin_asset_summary = tf.summary.tensor_summary(
            'old_plugin_asset_summary', placeholder)
        assets_directory = os.path.join(self.logdir, 'fry', 'plugins',
                                        'tensorboard_text')
        # Make the directory of assets if it does not exist.
        if not os.path.isdir(assets_directory):
            try:
                os.makedirs(assets_directory)
            except OSError as err:
                self.assertFail('Could not make assets directory %r: %r',
                                assets_directory, err)
        json_path = os.path.join(assets_directory, 'tensors.json')
        with open(json_path, 'w+') as tensors_json_file:
            # Write the op name to a JSON file that the text plugin later uses to
            # determine the tag names of tensors to fetch.
            tensors_json_file.write(json.dumps([plugin_asset_summary.op.name]))

        run_name = 'fry'
        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)

        summ = sess.run(plugin_asset_summary)
        writer.add_summary(summ)
        writer.close()
    def generate_run(self, run_name):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32, shape=[3])

        if run_name == self._RUN_WITH_LEGACY_HISTOGRAM:
            tf.summary.histogram(self._LEGACY_HISTOGRAM_TAG, placeholder)
        elif run_name == self._RUN_WITH_HISTOGRAM:
            summary.op(self._HISTOGRAM_TAG,
                       placeholder,
                       display_name=self._DISPLAY_NAME,
                       description=self._DESCRIPTION)
        elif run_name == self._RUN_WITH_SCALARS:
            tf.summary.scalar(self._SCALAR_TAG, tf.reduce_mean(placeholder))
        else:
            assert False, 'Invalid run name: %r' % run_name
        summ = tf.summary.merge_all()

        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)
        for step in xrange(self._STEPS):
            feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
            s = sess.run(summ, feed_dict=feed_dict)
            writer.add_summary(s, global_step=step)
        writer.close()
示例#5
0
    def generate_testdata(self, include_text=True, logdir=None):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.string)
        summary_tensor = tf.summary.text('message', placeholder)
        vector_summary = tf.summary.text('vector', placeholder)
        scalar_summary = tf.summary.scalar('twelve', tf.constant(12))

        run_names = ['fry', 'leela']
        for run_name in run_names:
            subdir = os.path.join(logdir or self.logdir, run_name)
            writer = tf.summary.FileWriter(subdir)
            writer.add_graph(sess.graph)

            step = 0
            for gem in GEMS:
                message = run_name + ' *loves* ' + gem
                feed_dict = {
                    placeholder: message,
                }
                if include_text:
                    summ = sess.run(summary_tensor, feed_dict=feed_dict)
                    writer.add_summary(summ, global_step=step)
                step += 1

            vector_message = ['one', 'two', 'three', 'four']
            if include_text:
                summ = sess.run(vector_summary,
                                feed_dict={placeholder: vector_message})
                writer.add_summary(summ)

            summ = sess.run(scalar_summary, feed_dict={placeholder: []})
            writer.add_summary(summ)

            writer.close()
示例#6
0
def WriteAudioSeries(writer, tag, n_audio=1):
    """Write a few dummy audio clips to writer."""
    step = 0
    session = tf.Session()

    min_frequency_hz = 440
    max_frequency_hz = 880
    sample_rate = 4000
    duration_frames = sample_rate // 2  # 0.5 seconds.
    frequencies_per_run = 1
    num_channels = 2

    p = tf.placeholder("float32",
                       (frequencies_per_run, duration_frames, num_channels))
    s = tf.summary.audio(tag, p, sample_rate)

    for _ in xrange(n_audio):
        # Generate a different frequency for each channel to show stereo works.
        frequencies = np.random.random_integers(min_frequency_hz,
                                                max_frequency_hz,
                                                size=(frequencies_per_run,
                                                      num_channels))
        tiled_frequencies = np.tile(frequencies, (1, duration_frames))
        tiled_increments = np.tile(np.arange(0, duration_frames),
                                   (num_channels, 1)).T.reshape(
                                       1, duration_frames * num_channels)
        tones = np.sin(2.0 * np.pi * tiled_frequencies * tiled_increments /
                       sample_rate)
        tones = tones.reshape(frequencies_per_run, duration_frames,
                              num_channels)

        summ = session.run(s, feed_dict={p: tones})
        writer.add_summary(summ, step)
        step += 20
    session.close()
    def _GenerateProjectorTestData(self):
        config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
        config = projector_config_pb2.ProjectorConfig()
        embedding = config.embeddings.add()
        # Add an embedding by its canonical tensor name.
        embedding.tensor_name = 'var1:0'

        with tf.gfile.GFile(os.path.join(self.log_dir, 'bookmarks.json'),
                            'w') as f:
            f.write('{"a": "b"}')
        embedding.bookmarks_path = 'bookmarks.json'

        config_pbtxt = text_format.MessageToString(config)
        with tf.gfile.GFile(config_path, 'w') as f:
            f.write(config_pbtxt)

        # Write a checkpoint with some dummy variables.
        with tf.Graph().as_default():
            sess = tf.Session()
            checkpoint_path = os.path.join(self.log_dir, 'model')
            tf.get_variable('var1', [1, 2],
                            initializer=tf.constant_initializer(6.0))
            tf.get_variable('var2', [10, 10])
            tf.get_variable('var3', [100, 100])
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
            saver.save(sess, checkpoint_path)
示例#8
0
 def _value_from_op(self, op):
   with tf.Session() as sess:
     summary_pbtxt = sess.run(op)
   summary = tf.Summary()
   summary.ParseFromString(summary_pbtxt)
   # There may be multiple values (e.g., for an image summary that emits
   # multiple images in one batch). That's fine; we'll choose any
   # representative value, assuming that they're homogeneous.
   assert summary.value
   return summary.value[0]
示例#9
0
 def _lazily_initialize(self):
     """Initialize the graph and session, if this has not yet been done."""
     with self._initialization_lock:
         if self._session:
             return
         graph = tf.Graph()
         with graph.as_default():
             self.initialize_graph()
         # Don't reserve GPU because libpng can't run on GPU.
         config = tf.ConfigProto(device_count={'GPU': 0})
         self._session = tf.Session(graph=graph, config=config)
示例#10
0
    def test_preserves_existing_session(self):
        with tf.Session() as sess:
            op = tf.reduce_sum([2, 2])
            self.assertIs(sess, tf.get_default_session())

            result = self._square(123)
            self.assertEqual(123 * 123, result)

            self.assertIs(sess, tf.get_default_session())
            number_of_lights = sess.run(op)
            self.assertEqual(number_of_lights, 4)
示例#11
0
def WriteImageSeries(writer, tag, n_images=1):
    """Write a few dummy images to writer."""
    step = 0
    session = tf.Session()
    p = tf.placeholder("uint8", (1, 4, 4, 3))
    s = tf.summary.image(tag, p)
    for _ in xrange(n_images):
        im = np.random.random_integers(0, 255, (1, 4, 4, 3))
        summ = session.run(s, feed_dict={p: im})
        writer.add_summary(summ, step)
        step += 20
    session.close()
示例#12
0
  def _generate_test_data(self, run_name, experiment_name):
    """Generates the test data directory.

    The test data has a single run of the given name, containing:
      - a graph definition and metagraph definition

    Arguments:
      run_name: The directory under self.logdir into which to write
          events.
    """
    run_path = os.path.join(self.logdir, run_name)
    writer = tf.summary.FileWriter(run_path)

    # Add a simple graph event.
    graph_def = tf.GraphDef()
    node1 = graph_def.node.add()
    node1.name = 'a'
    node2 = graph_def.node.add()
    node2.name = 'b'
    node2.attr['very_large_attr'].s = b'a' * 2048  # 2 KB attribute

    meta_graph_def = tf.MetaGraphDef(graph_def=graph_def)

    if self._only_use_meta_graph:
      writer.add_meta_graph(meta_graph_def)
    else:
      writer.add_graph(graph_def)

    writer.flush()
    writer.close()

    # Write data for the run to the database.
    # TODO(nickfelt): Figure out why reseting the graph is necessary.
    tf.reset_default_graph()
    db_writer = tf.contrib.summary.create_db_writer(
        db_uri=self.db_path,
        experiment_name=experiment_name,
        run_name=run_name,
        user_name='user')
    with db_writer.as_default(), tf.contrib.summary.always_record_summaries():
      tf.contrib.summary.scalar('mytag', 1)

    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.contrib.summary.summary_writer_initializer_op())
      sess.run(tf.contrib.summary.all_summary_ops())
示例#13
0
def run_all(logdir):
    tf.reset_default_graph()
    step_placeholder = tf.placeholder(tf.int32)

    with tf.name_scope('simple_example'):
        simple_example(step_placeholder)
    with tf.name_scope('markdown_table'):
        markdown_table(step_placeholder)
    with tf.name_scope('higher_order_tensors'):
        higher_order_tensors(step_placeholder)
    all_summaries = tf.summary.merge_all()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir)
        writer.add_graph(sess.graph)
        for step in xrange(STEPS):
            s = sess.run(all_summaries, feed_dict={step_placeholder: step})
            writer.add_summary(s, global_step=step)
        writer.close()
示例#14
0
def _make_sprite_image(thumbnails, thumbnail_dim):
    """Constructs a sprite image from thumbnails and returns the png bytes."""
    if len(thumbnails) < 1:
        raise ValueError('The length of "thumbnails" must be >= 1')

    if isinstance(thumbnails, np.ndarray) and thumbnails.ndim != 4:
        raise ValueError('"thumbnails" should be of rank 4, '
                         'but is of rank %d' % thumbnails.ndim)
    if isinstance(thumbnails, list):
        if not isinstance(thumbnails[0],
                          np.ndarray) or thumbnails[0].ndim != 3:
            raise ValueError(
                'Each element of "thumbnails" must be a 3D `ndarray`')
        thumbnails = np.array(thumbnails)

    with tf.Graph().as_default():
        s = tf.Session()
        resized_images = tf.image.resize_images(thumbnails,
                                                thumbnail_dim).eval(session=s)
        images_per_row = int(math.ceil(math.sqrt(len(thumbnails))))
        thumb_height = thumbnail_dim[0]
        thumb_width = thumbnail_dim[1]
        master_height = images_per_row * thumb_height
        master_width = images_per_row * thumb_width
        num_channels = thumbnails.shape[3]
        master = np.zeros([master_height, master_width, num_channels])
        for idx, image in enumerate(resized_images):
            left_idx = idx % images_per_row
            top_idx = int(math.floor(idx / images_per_row))
            left_start = left_idx * thumb_width
            left_end = left_start + thumb_width
            top_start = top_idx * thumb_height
            top_end = top_start + thumb_height
            master[top_start:top_end, left_start:left_end, :] = image

        if USING_TF:
            return tf.image.encode_png(master).eval(session=s)
        else:
            return master.tobytes()
示例#15
0
    def _get_writer_fn(self, event_batch):
        key = (event_batch.experiment_name, event_batch.run_name)
        if key in self._writer_fn_cache:
            return self._writer_fn_cache[key]
        with tf.Graph().as_default():
            placeholder = tf.placeholder(shape=[], dtype=tf.string)
            writer = tf.contrib.summary.create_db_writer(
                self._db_path,
                experiment_name=event_batch.experiment_name,
                run_name=event_batch.run_name)
            with writer.as_default():
                # TODO(nickfelt): running import_event() one record at a time is very
                #   slow; we should add an op that accepts a vector of records.
                import_op = tf.contrib.summary.import_event(placeholder)
            session = tf.Session()
            session.run(writer.init())

            def writer_fn(event_proto):
                session.run(import_op, feed_dict={placeholder: event_proto})

        self._writer_fn_cache[key] = writer_fn
        return writer_fn
示例#16
0
    def generate_run_to_db(self, experiment_name, run_name):
        tf.reset_default_graph()

        global_step = tf.placeholder(tf.int64)
        db_writer = tf.contrib.summary.create_db_writer(
            db_uri=self.db_path,
            experiment_name=experiment_name,
            run_name=run_name,
            user_name='user')

        scalar_ops = None
        with db_writer.as_default(
        ), tf.contrib.summary.always_record_summaries():
            tf.contrib.summary.scalar(self._SCALAR_TAG, 42, step=global_step)
            flush_op = tf.contrib.summary.flush(db_writer._resource)

        with tf.Session() as sess:
            sess.run(tf.contrib.summary.summary_writer_initializer_op())
            for step in xrange(self._STEPS):
                feed_dict = {global_step: step}
                sess.run(tf.contrib.summary.all_summary_ops(),
                         feed_dict=feed_dict)
            sess.run(flush_op)
示例#17
0
    def _test_dimensions(self, alpha=False, static_dimensions=True):
        if not alpha:
            images = self.images
            channel_count = 3
        else:
            images = self.images_with_alpha
            channel_count = 4

        if static_dimensions:
            images_tensor = tf.constant(images, dtype=tf.uint8)
            feed_dict = {}
        else:
            images_tensor = tf.placeholder(tf.uint8)
            feed_dict = {images_tensor: images}

        pb = self.compute_and_check_summary_pb('mona_lisa',
                                               images,
                                               images_tensor=images_tensor,
                                               feed_dict=feed_dict)
        self.assertEqual(1, len(pb.value))
        result = pb.value[0].tensor.string_val

        # Check annotated dimensions.
        self.assertEqual(tf.compat.as_bytes(str(self.image_width)), result[0])
        self.assertEqual(tf.compat.as_bytes(str(self.image_height)), result[1])

        # Check actual image dimensions.
        images = result[2:]
        with tf.Session() as sess:
            placeholder = tf.placeholder(tf.string)
            decoder = tf.image.decode_png(placeholder)
            for image in images:
                decoded = sess.run(decoder, feed_dict={placeholder: image})
                self.assertEqual(
                    (self.image_height, self.image_width, channel_count),
                    decoded.shape)
示例#18
0
def run_sobel(logdir, verbose=False):
  """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: sobel')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

  with tf.name_scope('horizontal_kernel'):
    kernel_side_length = kernel_radius * 2 + 1
    # Drop off influence for pixels further away from the center.
    weighting_kernel = (
        1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
    differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
    horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1),
                                  tf.expand_dims(differentiation_kernel, 0))

  with tf.name_scope('vertical_kernel'):
    vertical_kernel = tf.transpose(horizontal_kernel)

  float_image = tf.cast(image, tf.float32)
  dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
  dy = convolve(float_image, vertical_kernel, name='convolve_dy')
  gradient_magnitude = tf.norm([dx, dy], axis=0, name='gradient_magnitude')
  with tf.name_scope('normalized_gradient'):
    normalized_gradient = gradient_magnitude / tf.reduce_max(gradient_magnitude)
  with tf.name_scope('output_image'):
    output_image = tf.cast(255 * normalized_gradient, tf.uint8)

  summ = image_summary.op(
      'sobel', tf.stack([output_image]),
      display_name='Sobel edge detection',
      description=(u'Demonstration of [Sobel edge detection]. The step '
                   'parameter adjusts the radius of the kernel. '
                   'The kernel can be of arbitrary size, and considers '
                   u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
                   # (that says ``$\ell_2$-linear falloff'')
                   'Edge detection is done on a per-channel basis, so '
                   'you can observe which edges are &ldquo;mostly red '
                   'edges,&rdquo; for instance.\n\n'
                   'For practical edge detection, a small kernel '
                   '(usually not more than more than *r*=2) is best.\n\n'
                   '[Sobel edge detection]: %s\n\n'
                   "%s"
                   % ('https://en.wikipedia.org/wiki/Sobel_operator',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info("--- sobel: step: %s" % step)
        feed_dict = {kernel_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
示例#19
0
def run_box_to_gaussian(logdir, verbose=False):
  """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: box_to_gaussian')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
  with tf.name_scope('filter'):
    blur_side_length = blur_radius * 2 + 1
    pixel_filter = tf.ones((blur_side_length, blur_side_length))
    pixel_filter = (pixel_filter
                    / tf.cast(tf.size(pixel_filter), tf.float32))  # normalize

  iterations = 4
  images = [tf.cast(image, tf.float32) / 255.0]
  for _ in xrange(iterations):
    images.append(convolve(images[-1], pixel_filter))
  with tf.name_scope('convert_to_uint8'):
    images = tf.stack(
        [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
         for image_ in images])

  summ = image_summary.op(
      'box_to_gaussian', images, max_outputs=iterations,
      display_name='Gaussian blur as a limit process of box blurs',
      description=('Demonstration of forming a Gaussian blur by '
                   'composing box blurs, each of which can be expressed '
                   'as a 2D convolution.\n\n'
                   'A Gaussian blur is formed by convolving a Gaussian '
                   'kernel over an image. But a Gaussian kernel is '
                   'itself the limit of convolving a constant kernel '
                   'with itself many times. Thus, while applying '
                   'a box-filter convolution just once produces '
                   'results that are noticeably different from those '
                   'of a Gaussian blur, repeating the same convolution '
                   'just a few times causes the result to rapidly '
                   'converge to an actual Gaussian blur.\n\n'
                   'Here, the step value controls the blur radius, '
                   'and the image sample controls the number of times '
                   'that the convolution is applied (plus one). '
                   'So, when *sample*=1, the original image is shown; '
                   '*sample*=2 shows a box blur; and a hypothetical '
                   '*sample*=&infin; would show a true Gaussian blur.\n\n'
                   'This is one ingredient in a recipe to compute very '
                   'fast Gaussian blurs. The other pieces require '
                   'special treatment for the box blurs themselves '
                   '(decomposition to dual one-dimensional box blurs, '
                   'each of which is computed with a sliding window); '
                   'we don&rsquo;t perform those optimizations here.\n\n'
                   '[Here are some slides describing the full process.]'
                   '(%s)\n\n'
                   '%s'
                   % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info('--- box_to_gaussian: step: %s' % step)
        feed_dict = {blur_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
示例#20
0
def run():
    """Run custom scalar demo and generate event files."""
    step = tf.placeholder(tf.float32, shape=[])

    with tf.name_scope('loss'):
        # Specify 2 different loss values, each tagged differently.
        summary_lib.scalar('foo', tf.pow(0.9, step))
        summary_lib.scalar('bar', tf.pow(0.85, step + 2))

        # Log metric baz as well as upper and lower bounds for a margin chart.
        middle_baz_value = step + 4 * tf.random_uniform([]) - 2
        summary_lib.scalar('baz', middle_baz_value)
        summary_lib.scalar('baz_lower',
                           middle_baz_value - 6.42 - tf.random_uniform([]))
        summary_lib.scalar('baz_upper',
                           middle_baz_value + 6.42 + tf.random_uniform([]))

    with tf.name_scope('trigFunctions'):
        summary_lib.scalar('cosine', tf.cos(step))
        summary_lib.scalar('sine', tf.sin(step))
        summary_lib.scalar('tangent', tf.tan(step))

    merged_summary = tf.summary.merge_all()

    with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
        # We only need to specify the layout once (instead of per step).
        layout_summary = summary_lib.custom_scalar_pb(
            layout_pb2.Layout(category=[
                layout_pb2.Category(
                    title='losses',
                    chart=[
                        layout_pb2.Chart(
                            title='losses',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'loss(?!.*margin.*)'], )),
                        layout_pb2.Chart(
                            title='baz',
                            margin=layout_pb2.MarginChartContent(series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='loss/baz_lower/scalar_summary',
                                    upper='loss/baz_upper/scalar_summary'),
                            ], )),
                    ]),
                layout_pb2.Category(
                    title='trig functions',
                    chart=[
                        layout_pb2.Chart(
                            title='wave trig functions',
                            multiline=layout_pb2.MultilineChartContent(tag=[
                                r'trigFunctions/cosine', r'trigFunctions/sine'
                            ], )),
                        # The range of tangent is different. Give it its own chart.
                        layout_pb2.Chart(
                            title='tan',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'trigFunctions/tangent'], )),
                    ],
                    # This category we care less about. Make it initially closed.
                    closed=True),
            ]))
        writer.add_summary(layout_summary)

        for i in xrange(42):
            summary = sess.run(merged_summary, feed_dict={step: i})
            writer.add_summary(summary, global_step=i)
示例#21
0
def run_all(logdir, verbose=False):
  """Generate a bunch of histogram data, and write it to logdir."""
  del verbose

  tf.set_random_seed(0)

  k = tf.placeholder(tf.float32)

  # Make a normal distribution, with a shifting mean
  mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
  # Record that distribution into a histogram summary
  histogram_summary.op("normal/moving_mean",
                       mean_moving_normal,
                       description="A normal distribution whose mean changes "
                                   "over time.")

  # Make a normal distribution with shrinking variance
  shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
  # Record that distribution too
  histogram_summary.op("normal/shrinking_variance", shrinking_normal,
                       description="A normal distribution whose variance "
                                   "shrinks over time.")

  # Let's combine both of those distributions into one dataset
  normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0)
  # We add another histogram summary to record the combined distribution
  histogram_summary.op("normal/bimodal", normal_combined,
                       description="A combination of two normal distributions, "
                                   "one with a moving mean and one with  "
                                   "shrinking variance. The result is a "
                                   "distribution that starts as unimodal and "
                                   "becomes more and more bimodal over time.")

  # Add a gamma distribution
  gamma = tf.random_gamma(shape=[1000], alpha=k)
  histogram_summary.op("gamma", gamma,
                       description="A gamma distribution whose shape "
                                   "parameter, α, changes over time.")

  # And a poisson distribution
  poisson = tf.random_poisson(shape=[1000], lam=k)
  histogram_summary.op("poisson", poisson,
                       description="A Poisson distribution, which only "
                                   "takes on integer values.")

  # And a uniform distribution
  uniform = tf.random_uniform(shape=[1000], maxval=k*10)
  histogram_summary.op("uniform", uniform,
                       description="A simple uniform distribution.")

  # Finally, combine everything together!
  all_distributions = [mean_moving_normal, shrinking_normal,
                       gamma, poisson, uniform]
  all_combined = tf.concat(all_distributions, 0)
  histogram_summary.op("all_combined", all_combined,
                       description="An amalgamation of five distributions: a "
                                   "uniform distribution, a gamma "
                                   "distribution, a Poisson distribution, and "
                                   "two normal distributions.")

  summaries = tf.summary.merge_all()

  # Setup a session and summary writer
  sess = tf.Session()
  writer = tf.summary.FileWriter(logdir)

  # Setup a loop and write the summaries to disk
  N = 400
  for step in xrange(N):
    k_val = step/float(N)
    summ = sess.run(summaries, feed_dict={k: k_val})
    writer.add_summary(summ, global_step=step)
示例#22
0
def start_runs(logdir,
               steps,
               run_name,
               thresholds,
               mask_every_other_prediction=False):
    """Generate a PR curve with precision and recall evenly weighted.

  Arguments:
    logdir: The directory into which to store all the runs' data.
    steps: The number of steps to run for.
    run_name: The name of the run.
    thresholds: The number of thresholds to use for PR curves.
    mask_every_other_prediction: Whether to mask every other prediction by
      alternating weights between 0 and 1.
  """
    tf.reset_default_graph()
    tf.set_random_seed(42)

    # Create a normal distribution layer used to generate true color labels.
    distribution = tf.distributions.Normal(loc=0., scale=142.)

    # Sample the distribution to generate colors. Lets generate different numbers
    # of each color. The first dimension is the count of examples.

    # The calls to sample() are given fixed random seed values that are "magic"
    # in that they correspond to the default seeds for those ops when the PR
    # curve test (which depends on this code) was written. We've pinned these
    # instead of continuing to use the defaults since the defaults are based on
    # node IDs from the sequence of nodes added to the graph, which can silently
    # change when this code or any TF op implementations it uses are modified.

    # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.

    # Generate reds.
    number_of_reds = 100
    true_reds = tf.clip_by_value(
        tf.concat([
            255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
            tf.abs(distribution.sample([number_of_reds, 2], seed=34))
        ],
                  axis=1), 0, 255)

    # Generate greens.
    number_of_greens = 200
    true_greens = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
            255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
            tf.abs(distribution.sample([number_of_greens, 1], seed=105))
        ],
                  axis=1), 0, 255)

    # Generate blues.
    number_of_blues = 150
    true_blues = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
            255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153))
        ],
                  axis=1), 0, 255)

    # Assign each color a vector of 3 booleans based on its true label.
    labels = tf.concat([
        tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
        tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
        tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
    ],
                       axis=0)

    # We introduce 3 normal distributions. They are used to predict whether a
    # color falls under a certain class (based on distances from corners of the
    # color triangle). The distributions vary per color. We have the distributions
    # narrow over time.
    initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
    iteration = tf.placeholder(tf.int32, shape=[])
    red_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[0] - iteration,
                      dtype=tf.float32))
    green_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[1] - iteration,
                      dtype=tf.float32))
    blue_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[2] - iteration,
                      dtype=tf.float32))

    # Make predictions (assign 3 probabilities to each color based on each color's
    # distance to each of the 3 corners). We seek double the area in the right
    # tail of the normal distribution.
    examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
    probabilities_colors_are_red = (1 - red_predictor.cdf(
        tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2
    probabilities_colors_are_green = (1 - green_predictor.cdf(
        tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2
    probabilities_colors_are_blue = (1 - blue_predictor.cdf(
        tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2

    predictions = (probabilities_colors_are_red,
                   probabilities_colors_are_green,
                   probabilities_colors_are_blue)

    # This is the crucial piece. We write data required for generating PR curves.
    # We create 1 summary per class because we create 1 PR curve per class.
    for i, color in enumerate(('red', 'green', 'blue')):
        description = (
            'The probabilities used to create this PR curve are '
            'generated from a normal distribution. Its standard '
            'deviation is initially %0.0f and decreases over time.' %
            initial_standard_deviations[i])

        weights = None
        if mask_every_other_prediction:
            # Assign a weight of 0 to every even-indexed prediction. Odd-indexed
            # predictions are assigned a default weight of 1.
            consecutive_indices = tf.reshape(tf.range(tf.size(predictions[i])),
                                             tf.shape(predictions[i]))
            weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)

        summary.op(name=color,
                   labels=labels[:, i],
                   predictions=predictions[i],
                   num_thresholds=thresholds,
                   weights=weights,
                   display_name='classifying %s' % color,
                   description=description)
    merged_summary_op = tf.summary.merge_all()
    events_directory = os.path.join(logdir, run_name)
    sess = tf.Session()
    writer = tf.summary.FileWriter(events_directory, sess.graph)

    for step in xrange(steps):
        feed_dict = {
            iteration: step,
        }
        merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
        writer.add_summary(merged_summary, step)

    writer.close()
示例#23
0
def run(logdir, session_id, hparams, group_name):
    """Runs a temperature simulation.

  This will simulate an object at temperature `initial_temperature`
  sitting at rest in a large room at temperature `ambient_temperature`.
  The object has some intrinsic `heat_coefficient`, which indicates
  how much thermal conductivity it has: for instance, metals have high
  thermal conductivity, while the thermal conductivity of water is low.

  Over time, the object's temperature will adjust to match the
  temperature of its environment. We'll track the object's temperature,
  how far it is from the room's temperature, and how much it changes at
  each time step.

  Arguments:
    logdir: the top-level directory into which to write summary data
    session_id: an id for the session.
    hparams: A dictionary mapping an hyperparameter name to its value.
    group_name: an id for the session group this session belongs to.
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    initial_temperature = hparams['initial_temperature']
    ambient_temperature = hparams['ambient_temperature']
    heat_coefficient = hparams['heat_coefficient']
    session_dir = os.path.join(logdir, session_id)
    writer = tf.summary.FileWriter(session_dir)
    writer.add_summary(
        summary.session_start_pb(hparams=hparams, group_name=group_name))
    writer.flush()
    with tf.name_scope('temperature'):
        # Create a mutable variable to hold the object's temperature, and
        # create a scalar summary to track its value over time. The name of
        # the summary will appear as "temperature/current" due to the
        # name-scope above.
        temperature = tf.Variable(tf.constant(initial_temperature),
                                  name='temperature')
        scalar_summary.op('current',
                          temperature,
                          display_name='Temperature',
                          description='The temperature of the object under '
                          'simulation, in Kelvins.')

        # Compute how much the object's temperature differs from that of its
        # environment, and track this, too: likewise, as
        # "temperature/difference_to_ambient".
        ambient_difference = temperature - ambient_temperature
        scalar_summary.op(
            'difference_to_ambient',
            ambient_difference,
            display_name='Difference to ambient temperature',
            description=('The difference between the ambient '
                         'temperature and the temperature of the '
                         'object under simulation, in Kelvins.'))

    # Newton suggested that the rate of change of the temperature of an
    # object is directly proportional to this `ambient_difference` above,
    # where the proportionality constant is what we called the heat
    # coefficient. But in real life, not everything is quite so clean, so
    # we'll add in some noise. (The value of 50 is arbitrary, chosen to
    # make the data look somewhat interesting. :-) )
    noise = 50 * tf.random_normal([])
    delta = -heat_coefficient * (ambient_difference + noise)
    scalar_summary.op(
        'delta',
        delta,
        description='The change in temperature from the previous '
        'step, in Kelvins.')

    # Collect all the scalars that we want to keep track of.
    summ = tf.summary.merge_all()

    # Now, augment the current temperature by this delta that we computed,
    # blocking the assignment on summary collection to avoid race conditions
    # and ensure that the summary always reports the pre-update value.
    with tf.control_dependencies([summ]):
        update_step = temperature.assign_add(delta)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for step in xrange(STEPS):
        # By asking TensorFlow to compute the update step, we force it to
        # change the value of the temperature variable. We don't actually
        # care about this value, so we discard it; instead, we grab the
        # summary data computed along the way.
        (s, _) = sess.run([summ, update_step])
        writer.add_summary(s, global_step=step)
    writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
    writer.close()
示例#24
0
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate audio. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create old-style audio summaries for run "foo".
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32)
        tf.summary.audio(name="baz", tensor=placeholder, sample_rate=44100)
        merged_summary_op = tf.summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        writer = tf.summary.FileWriter(foo_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={placeholder: numpy.random.rand(42, 22050) * 2 - 1}),
                               global_step=step)
        writer.close()

        # Create new-style audio summaries for run "bar".
        tf.reset_default_graph()
        sess = tf.Session()
        audio_placeholder = tf.placeholder(tf.float32)
        labels_placeholder = tf.placeholder(tf.string)
        summary.op("quux",
                   audio_placeholder,
                   sample_rate=44100,
                   labels=labels_placeholder,
                   description="how do you pronounce that, anyway?")
        merged_summary_op = tf.summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        writer = tf.summary.FileWriter(bar_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={
                    audio_placeholder:
                    numpy.random.rand(42, 11025, 1) * 2 - 1,
                    labels_placeholder: [
                        tf.compat.as_bytes('step **%s**, sample %s' %
                                           (step, sample))
                        for sample in xrange(42)
                    ],
                }),
                               global_step=step)
        writer.close()

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        context = base_plugin.TBContext(logdir=self.log_dir,
                                        multiplexer=multiplexer)
        self.plugin = audio_plugin.AudioPlugin(context)
        # Setting a reload interval of -1 disables reloading. We disable reloading
        # because we seek to block tests from running til after one reload finishes.
        # This setUp method thus manually reloads the multiplexer. TensorBoard would
        # otherwise reload in a non-blocking thread.
        wsgi_app = application.TensorBoardWSGIApp(self.log_dir, [self.plugin],
                                                  multiplexer,
                                                  reload_interval=-1,
                                                  path_prefix='')
        self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
        multiplexer.Reload()
示例#25
0
 def pb_via_op(self, summary_op, feed_dict=None):
     with tf.Session() as sess:
         actual_pbtxt = sess.run(summary_op, feed_dict=feed_dict or {})
     actual_proto = tf.Summary()
     actual_proto.ParseFromString(actual_pbtxt)
     return actual_proto
示例#26
0
def run(logdir, run_name, wave_name, wave_constructor):
    """Generate wave data of the given form.

  The provided function `wave_constructor` should accept a scalar tensor
  of type float32, representing the frequency (in Hz) at which to
  construct a wave, and return a tensor of shape [1, _samples(), `n`]
  representing audio data (for some number of channels `n`).

  Waves will be generated at frequencies ranging from A4 to A5.

  Arguments:
    logdir: the top-level directory into which to write summary data
    run_name: the name of this run; will be created as a subdirectory
      under logdir
    wave_name: the name of the wave being generated
    wave_constructor: see above
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    # On each step `i`, we'll set this placeholder to `i`. This allows us
    # to know "what time it is" at each step.
    step_placeholder = tf.placeholder(tf.float32, shape=[])

    # We want to linearly interpolate a frequency between A4 (440 Hz) and
    # A5 (880 Hz).
    with tf.name_scope('compute_frequency'):
        f_min = 440.0
        f_max = 880.0
        t = step_placeholder / (FLAGS.steps - 1)
        frequency = f_min * (1.0 - t) + f_max * t

    # Let's log this frequency, just so that we can make sure that it's as
    # expected.
    tf.summary.scalar('frequency', frequency)

    # Now, we pass this to the wave constructor to get our waveform. Doing
    # so within a name scope means that any summaries that the wave
    # constructor produces will be namespaced.
    with tf.name_scope(wave_name):
        waveform = wave_constructor(frequency)

    # We also have the opportunity to annotate each audio clip with a
    # label. This is a good place to include the frequency, because it'll
    # be visible immediately next to the audio clip.
    with tf.name_scope('compute_labels'):
        samples = tf.shape(waveform)[0]
        wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
        frequencies = tf.string_join([
            "*Frequency:* ",
            tf.tile([tf.as_string(frequency, precision=2)], [samples]),
            " Hz.",
        ])
        samples = tf.string_join([
            "*Sample:* ",
            tf.as_string(tf.range(samples) + 1),
            " of ",
            tf.as_string(samples),
            ".",
        ])
        labels = tf.string_join([wave_types, frequencies, samples],
                                separator=" ")

    # We can place a description next to the summary in TensorBoard. This
    # is a good place to explain what the summary represents, methodology
    # for creating it, etc. Let's include the source code of the function
    # that generated the wave.
    source = '\n'.join('    %s' % line.rstrip()
                       for line in inspect.getsourcelines(wave_constructor)[0])
    description = ("A wave of type `%r`, generated via:\n\n%s" %
                   (wave_name, source))

    # Here's the crucial piece: we interpret this result as audio.
    summary.op('waveform',
               waveform,
               FLAGS.sample_rate,
               labels=labels,
               display_name=wave_name,
               description=description)

    # Now, we can collect up all the summaries and begin the run.
    summ = tf.summary.merge_all()

    sess = tf.Session()
    writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
    writer.add_graph(sess.graph)
    sess.run(tf.global_variables_initializer())
    for step in xrange(FLAGS.steps):
        s = sess.run(summ, feed_dict={step_placeholder: float(step)})
        writer.add_summary(s, global_step=step)
    writer.close()
示例#27
0
 def test_non_string_value_in_op(self):
     with six.assertRaisesRegex(self, Exception,
                                r'must be of type <dtype: \'string\'>'):
         with tf.Session() as sess:
             sess.run(summary.op('so', tf.constant(5)))