コード例 #1
0
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate images. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create old-style image summaries for run "foo".
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.uint8)
        tf.summary.image(name="baz", tensor=placeholder)
        merged_summary_op = tf.summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        writer = tf.summary.FileWriter(foo_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            writer.add_summary(sess.run(merged_summary_op,
                                        feed_dict={
                                            placeholder:
                                            (numpy.random.rand(1, 16, 42, 3) *
                                             255).astype(numpy.uint8)
                                        }),
                               global_step=step)
        writer.close()

        # Create new-style image summaries for run bar.
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.uint8)
        summary.op(name="quux",
                   images=placeholder,
                   description="how do you pronounce that, anyway?")
        merged_summary_op = tf.summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        writer = tf.summary.FileWriter(bar_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            writer.add_summary(sess.run(merged_summary_op,
                                        feed_dict={
                                            placeholder:
                                            (numpy.random.rand(1, 6, 8, 3) *
                                             255).astype(numpy.uint8)
                                        }),
                               global_step=step)
        writer.close()

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        context = base_plugin.TBContext(logdir=self.log_dir,
                                        multiplexer=multiplexer)
        plugin = images_plugin.ImagesPlugin(context)
        wsgi_app = application.TensorBoardWSGIApp(self.log_dir, [plugin],
                                                  multiplexer,
                                                  reload_interval=0)
        self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
        self.routes = plugin.get_plugin_apps()
コード例 #2
0
  def setUp(self):
    self.log_dir = tempfile.mkdtemp()

    # We use numpy.random to generate images. We seed to avoid non-determinism
    # in this test.
    numpy.random.seed(42)

    # Create old-style image summaries for run "foo".
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    tf.summary.image(name="baz", tensor=placeholder)
    merged_summary_op = tf.summary.merge_all()
    foo_directory = os.path.join(self.log_dir, "foo")
    writer = tf.summary.FileWriter(foo_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 16, 42, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Create new-style image summaries for run bar.
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    summary.op(name="quux", images=placeholder,
               description="how do you pronounce that, anyway?")
    merged_summary_op = tf.summary.merge_all()
    bar_directory = os.path.join(self.log_dir, "bar")
    writer = tf.summary.FileWriter(bar_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 6, 8, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Start a server with the plugin.
    multiplexer = event_multiplexer.EventMultiplexer({
        "foo": foo_directory,
        "bar": bar_directory,
    })
    context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    plugin = images_plugin.ImagesPlugin(context)
    # Setting a reload interval of -1 disables reloading. We disable reloading
    # because we seek to block tests from running til after one reload finishes.
    # This setUp method thus manually reloads the multiplexer. TensorBoard would
    # otherwise reload in a non-blocking thread.
    wsgi_app = application.TensorBoardWSGIApp(
        self.log_dir, [plugin], multiplexer, reload_interval=-1, path_prefix='')
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
    multiplexer.Reload()
    self.routes = plugin.get_plugin_apps()
コード例 #3
0
  def setUp(self):
    self.log_dir = tempfile.mkdtemp()

    # We use numpy.random to generate images. We seed to avoid non-determinism
    # in this test.
    numpy.random.seed(42)

    # Create old-style image summaries for run "foo".
    tf.compat.v1.reset_default_graph()
    sess = tf.compat.v1.Session()
    placeholder = tf.compat.v1.placeholder(tf.uint8)
    tf.compat.v1.summary.image(name="baz", tensor=placeholder)
    merged_summary_op = tf.compat.v1.summary.merge_all()
    foo_directory = os.path.join(self.log_dir, "foo")
    with test_util.FileWriterCache.get(foo_directory) as writer:
      writer.add_graph(sess.graph)
      for step in xrange(2):
        writer.add_summary(sess.run(merged_summary_op, feed_dict={
            placeholder: (numpy.random.rand(1, 16, 42, 3) * 255).astype(
                numpy.uint8)
        }), global_step=step)

    # Create new-style image summaries for run bar.
    tf.compat.v1.reset_default_graph()
    sess = tf.compat.v1.Session()
    placeholder = tf.compat.v1.placeholder(tf.uint8)
    summary.op(name="quux", images=placeholder,
               description="how do you pronounce that, anyway?")
    merged_summary_op = tf.compat.v1.summary.merge_all()
    bar_directory = os.path.join(self.log_dir, "bar")
    with test_util.FileWriterCache.get(bar_directory) as writer:
      writer.add_graph(sess.graph)
      for step in xrange(2):
        writer.add_summary(sess.run(merged_summary_op, feed_dict={
            placeholder: (numpy.random.rand(1, 8, 6, 3) * 255).astype(
                numpy.uint8)
        }), global_step=step)

    # Start a server with the plugin.
    multiplexer = event_multiplexer.EventMultiplexer({
        "foo": foo_directory,
        "bar": bar_directory,
    })
    context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    plugin = images_plugin.ImagesPlugin(context)
    # Setting a reload interval of -1 disables reloading. We disable reloading
    # because we seek to block tests from running til after one reload finishes.
    # This setUp method thus manually reloads the multiplexer. TensorBoard would
    # otherwise reload in a non-blocking thread.
    wsgi_app = application.TensorBoardWSGIApp(
        self.log_dir, [plugin], multiplexer, reload_interval=-1, path_prefix='')
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
    multiplexer.Reload()
    self.routes = plugin.get_plugin_apps()
コード例 #4
0
ファイル: summary_test.py プロジェクト: kokimame/tensoba
 def image(self, *args, **kwargs):
     args = list(args)
     # Force first argument to tf.uint8 since the V1 version requires this.
     args[1] = tf.cast(tf.constant(args[1]), tf.uint8)
     return summary_pb2.Summary.FromString(
         summary.op(*args, **kwargs).numpy()
     )
コード例 #5
0
 def test_new_style_image(self):
     op = image_summary.op('mona_lisa',
                           tf.cast(tf.random_normal(shape=[1, 400, 200, 3]),
                                   tf.uint8),
                           display_name='The Mona Lisa',
                           description='A renowned portrait by da Vinci.')
     value = self._value_from_op(op)
     assert value.HasField('tensor'), value
     self._assert_noop(value)
コード例 #6
0
ファイル: data_compat_test.py プロジェクト: jlewi/tensorboard
 def test_new_style_image(self):
   op = image_summary.op('mona_lisa',
                         tf.cast(tf.random_normal(shape=[1, 400, 200, 3]),
                                 tf.uint8),
                         display_name='The Mona Lisa',
                         description='A renowned portrait by da Vinci.')
   value = self._value_from_op(op)
   assert value.HasField('tensor'), value
   self._assert_noop(value)
コード例 #7
0
 def test_new_style_image(self):
     with tf.compat.v1.Graph().as_default():
         op = image_summary.op(
             'mona_lisa',
             tf.image.convert_image_dtype(
                 tf.random.normal(shape=[1, 400, 200, 3]),
                 tf.uint8,
                 saturate=True),
             display_name='The Mona Lisa',
             description='A renowned portrait by da Vinci.')
         value = self._value_from_op(op)
     assert value.HasField('tensor'), value
     self._assert_noop(value)
コード例 #8
0
    def compute_and_check_summary_pb(self,
                                     name,
                                     images,
                                     max_outputs=3,
                                     images_tensor=None,
                                     feed_dict=None):
        """Use both `op` and `pb` to get a summary, asserting equality.

    Returns:
      a `Summary` protocol buffer
    """
        if images_tensor is None:
            images_tensor = tf.cast(tf.constant(images), tf.uint8)
        op = summary.op(name, images_tensor, max_outputs=max_outputs)
        pb = summary.pb(name, images, max_outputs=max_outputs)
        pb_via_op = self.pb_via_op(op, feed_dict=feed_dict)
        self.assertProtoEquals(pb, pb_via_op)
        return pb
コード例 #9
0
    def testNewStyleImageSummary(self):
        """Verify processing of tensorboard.plugins.image.summary."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = test_util.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with tf.compat.v1.Graph().as_default():
            with self.test_session() as sess:
                ipt = tf.ones([10, 4, 4, 3], tf.uint8)
                # This is an interesting example, because the old tf.image_summary op
                # would throw an error here, because it would be tag reuse.
                # Using the tf node name instead allows argument re-use to the image
                # summary.
                with tf.name_scope("1"):
                    image_summary.op("images", ipt, max_outputs=1)
                with tf.name_scope("2"):
                    image_summary.op("images", ipt, max_outputs=2)
                with tf.name_scope("3"):
                    image_summary.op("images", ipt, max_outputs=3)
                merged = tf.compat.v1.summary.merge_all()
                writer.add_graph(sess.graph)
                for i in range(10):
                    summ = sess.run(merged)
                    writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        tags = [
            graph_metadata.RUN_GRAPH_NAME,
            "1/images/image_summary",
            "2/images/image_summary",
            "3/images/image_summary",
        ]
        self.assertTagsEqual(
            accumulator.Tags(),
            {
                ea.TENSORS: tags,
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            },
        )

        self.assertItemsEqual(
            accumulator.ActivePlugins(),
            [image_metadata.PLUGIN_NAME, graph_metadata.PLUGIN_NAME],
        )
コード例 #10
0
  def testNewStyleImageSummary(self):
    """Verify processing of tensorboard.plugins.image.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        image_summary.op('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        image_summary.op('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        image_summary.op('images', ipt, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image_summary',
        u'2/images/image_summary',
        u'3/images/image_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
コード例 #11
0
  def testNewStyleImageSummary(self):
    """Verify processing of tensorboard.plugins.image.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        image_summary.op('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        image_summary.op('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        image_summary.op('images', ipt, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image_summary',
        u'2/images/image_summary',
        u'3/images/image_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
コード例 #12
0
ファイル: images_demo.py プロジェクト: taehoonlee/tensorboard
def run_sobel(logdir, verbose=False):
    """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
    if verbose:
        logger.info('--- Starting run: sobel')

    tf.compat.v1.reset_default_graph()
    tf.compat.v1.set_random_seed(0)

    image = get_image(verbose=verbose)
    kernel_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32)

    with tf.name_scope('horizontal_kernel'):
        kernel_side_length = kernel_radius * 2 + 1
        # Drop off influence for pixels further away from the center.
        weighting_kernel = (
            1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
        differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
        horizontal_kernel = tf.matmul(
            tf.expand_dims(weighting_kernel, 1),
            tf.expand_dims(differentiation_kernel, 0))

    with tf.name_scope('vertical_kernel'):
        vertical_kernel = tf.transpose(a=horizontal_kernel)

    float_image = tf.cast(image, tf.float32)
    dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
    dy = convolve(float_image, vertical_kernel, name='convolve_dy')
    gradient_magnitude = tf.norm(tensor=[dx, dy],
                                 axis=0,
                                 name='gradient_magnitude')
    with tf.name_scope('normalized_gradient'):
        normalized_gradient = gradient_magnitude / tf.reduce_max(
            input_tensor=gradient_magnitude)
    with tf.name_scope('output_image'):
        output_image = tf.cast(255 * normalized_gradient, tf.uint8)

    summ = image_summary.op(
        'sobel',
        tf.stack([output_image]),
        display_name='Sobel edge detection',
        description=(
            u'Demonstration of [Sobel edge detection]. The step '
            'parameter adjusts the radius of the kernel. '
            'The kernel can be of arbitrary size, and considers '
            u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
            # (that says ``$\ell_2$-linear falloff'')
            'Edge detection is done on a per-channel basis, so '
            'you can observe which edges are “mostly red '
            'edges,” for instance.\n\n'
            'For practical edge detection, a small kernel '
            '(usually not more than more than *r*=2) is best.\n\n'
            '[Sobel edge detection]: %s\n\n'
            "%s" %
            ('https://en.wikipedia.org/wiki/Sobel_operator', IMAGE_CREDIT)))

    with tf.compat.v1.Session() as sess:
        sess.run(image.initializer)
        writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
        writer.add_graph(sess.graph)
        for step in xrange(8):
            if verbose:
                logger.info("--- sobel: step: %s" % step)
                feed_dict = {kernel_radius: step}
            run_options = tf.compat.v1.RunOptions(
                trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
            run_metadata = tf.compat.v1.RunMetadata()
            s = sess.run(summ,
                         feed_dict=feed_dict,
                         options=run_options,
                         run_metadata=run_metadata)
            writer.add_summary(s, global_step=step)
            writer.add_run_metadata(run_metadata, 'step_%04d' % step)
        writer.close()
コード例 #13
0
ファイル: images_demo.py プロジェクト: taehoonlee/tensorboard
def run_box_to_gaussian(logdir, verbose=False):
    """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
    if verbose:
        logger.info('--- Starting run: box_to_gaussian')

    tf.compat.v1.reset_default_graph()
    tf.compat.v1.set_random_seed(0)

    image = get_image(verbose=verbose)
    blur_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32)
    with tf.name_scope('filter'):
        blur_side_length = blur_radius * 2 + 1
        pixel_filter = tf.ones((blur_side_length, blur_side_length))
        pixel_filter = (pixel_filter /
                        tf.cast(tf.size(input=pixel_filter), tf.float32)
                        )  # normalize

    iterations = 4
    images = [tf.cast(image, tf.float32) / 255.0]
    for _ in xrange(iterations):
        images.append(convolve(images[-1], pixel_filter))
    with tf.name_scope('convert_to_uint8'):
        images = tf.stack([
            tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
            for image_ in images
        ])

    summ = image_summary.op(
        'box_to_gaussian',
        images,
        max_outputs=iterations,
        display_name='Gaussian blur as a limit process of box blurs',
        description=(
            'Demonstration of forming a Gaussian blur by '
            'composing box blurs, each of which can be expressed '
            'as a 2D convolution.\n\n'
            'A Gaussian blur is formed by convolving a Gaussian '
            'kernel over an image. But a Gaussian kernel is '
            'itself the limit of convolving a constant kernel '
            'with itself many times. Thus, while applying '
            'a box-filter convolution just once produces '
            'results that are noticeably different from those '
            'of a Gaussian blur, repeating the same convolution '
            'just a few times causes the result to rapidly '
            'converge to an actual Gaussian blur.\n\n'
            'Here, the step value controls the blur radius, '
            'and the image sample controls the number of times '
            'that the convolution is applied (plus one). '
            'So, when *sample*=1, the original image is shown; '
            '*sample*=2 shows a box blur; and a hypothetical '
            '*sample*=∞ would show a true Gaussian blur.\n\n'
            'This is one ingredient in a recipe to compute very '
            'fast Gaussian blurs. The other pieces require '
            'special treatment for the box blurs themselves '
            '(decomposition to dual one-dimensional box blurs, '
            'each of which is computed with a sliding window); '
            'we don’t perform those optimizations here.\n\n'
            '[Here are some slides describing the full process.]'
            '(%s)\n\n'
            '%s' % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                    IMAGE_CREDIT)))

    with tf.compat.v1.Session() as sess:
        sess.run(image.initializer)
        writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
        writer.add_graph(sess.graph)
        for step in xrange(8):
            if verbose:
                logger.info('--- box_to_gaussian: step: %s' % step)
                feed_dict = {blur_radius: step}
            run_options = tf.compat.v1.RunOptions(
                trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
            run_metadata = tf.compat.v1.RunMetadata()
            s = sess.run(summ,
                         feed_dict=feed_dict,
                         options=run_options,
                         run_metadata=run_metadata)
            writer.add_summary(s, global_step=step)
            writer.add_run_metadata(run_metadata, 'step_%04d' % step)
        writer.close()
コード例 #14
0
    def _create_data(self):
        """Write test data to disk, returning `(logdir, multiplexer)`."""
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate images. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create old-style image summaries for run "foo".
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        placeholder = tf.compat.v1.placeholder(tf.uint8)
        tf.compat.v1.summary.image(name="baz", tensor=placeholder)
        merged_summary_op = tf.compat.v1.summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        with test_util.FileWriterCache.get(foo_directory) as writer:
            writer.add_graph(sess.graph)
            for step in xrange(2):
                writer.add_summary(
                    sess.run(
                        merged_summary_op,
                        feed_dict={
                            placeholder: (numpy.random.rand(1, 16, 42, 3) *
                                          255).astype(numpy.uint8)
                        },
                    ),
                    global_step=step,
                )

        # Create new-style image summaries for run bar.
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        placeholder = tf.compat.v1.placeholder(tf.uint8)
        summary.op(
            name="quux",
            images=placeholder,
            description="how do you pronounce that, anyway?",
        )
        merged_summary_op = tf.compat.v1.summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        with test_util.FileWriterCache.get(bar_directory) as writer:
            writer.add_graph(sess.graph)
            for step in xrange(2):
                writer.add_summary(
                    sess.run(
                        merged_summary_op,
                        feed_dict={
                            placeholder: (numpy.random.rand(1, 8, 6, 3) *
                                          255).astype(numpy.uint8)
                        },
                    ),
                    global_step=step,
                )

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        multiplexer.Reload()
        return (self.log_dir, multiplexer)
コード例 #15
0
 def test_requires_rank_4_in_op(self):
     with six.assertRaisesRegex(self, ValueError, 'must have rank 4'):
         summary.op('mona_lisa', tf.constant([[1, 2, 3], [4, 5, 6]]))