Ejemplo n.º 1
0
    def testExtractGatedGrpcTensorsFoundGatedGrpcOps(self):
        with tf.compat.v1.Session() as sess:
            z, run_options = self._createTestGraphAndRunOptions(
                sess, gated_grpc=True)

            sess.run(tf.compat.v1.global_variables_initializer())
            run_metadata = config_pb2.RunMetadata()
            self.assertAllClose(
                [10.0],
                sess.run(z, options=run_options, run_metadata=run_metadata),
            )

            graph_wrapper = debug_graphs_helper.DebugGraphWrapper(
                run_metadata.partition_graphs[0])
            gated_debug_ops = graph_wrapper.get_gated_grpc_tensors()

            # Verify that the op types are available.
            for item in gated_debug_ops:
                self.assertTrue(item[1])

            # Strip out the op types before further checks, because op type names can
            # change in the future (e.g., 'VariableV2' --> 'VariableV3').
            gated_debug_ops = [(item[0], item[2], item[3])
                               for item in gated_debug_ops]

            self.assertIn(("a", 0, "DebugIdentity"), gated_debug_ops)
            self.assertIn(("b", 0, "DebugIdentity"), gated_debug_ops)
            self.assertIn(("c", 0, "DebugIdentity"), gated_debug_ops)
            self.assertIn(("d", 0, "DebugIdentity"), gated_debug_ops)

            self.assertIn(("x", 0, "DebugIdentity"), gated_debug_ops)
            self.assertIn(("y", 0, "DebugIdentity"), gated_debug_ops)
            self.assertIn(("z", 0, "DebugIdentity"), gated_debug_ops)
Ejemplo n.º 2
0
  def generate_run(self, run_name, include_graph):
    """Create a run with a text summary, metadata, and optionally a graph."""
    tf.compat.v1.reset_default_graph()
    k1 = tf.constant(math.pi, name='k1')
    k2 = tf.constant(math.e, name='k2')
    result = (k1 ** k2) - k1
    expected = tf.constant(20.0, name='expected')
    error = tf.abs(result - expected, name='error')
    message_prefix_value = 'error ' * 1000
    true_length = len(message_prefix_value)
    assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
    message_prefix = tf.constant(message_prefix_value, name='message_prefix')
    error_message = tf.strings.join([message_prefix,
                                    tf.as_string(error, name='error_string')],
                                   name='error_message')
    summary_message = tf.compat.v1.summary.text('summary_message', error_message)

    sess = tf.compat.v1.Session()
    writer = test_util.FileWriter(os.path.join(self.logdir, run_name))
    if include_graph:
      writer.add_graph(sess.graph)
    options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
    run_metadata = config_pb2.RunMetadata()
    s = sess.run(summary_message, options=options, run_metadata=run_metadata)
    writer.add_summary(s)
    writer.add_run_metadata(run_metadata, self._METADATA_TAG)
    writer.close()
Ejemplo n.º 3
0
 def test_run_metadata(self):
     self.set_up_with_runs()
     (metadata_pbtxt,
      mime_type) = self.plugin.run_metadata_impl(self._RUN_WITH_GRAPH,
                                                 self._METADATA_TAG)
     self.assertEqual(mime_type, 'text/x-protobuf')
     text_format.Parse(metadata_pbtxt, config_pb2.RunMetadata())
Ejemplo n.º 4
0
 def test_run_metadata(self, plugin):
     ctx = context.RequestContext()
     result = plugin.run_metadata_impl(ctx, "123",
                                       _RUN_WITH_GRAPH_WITH_METADATA[0],
                                       self._METADATA_TAG)
     (metadata_pbtxt, mime_type) = result
     self.assertEqual(mime_type, "text/x-protobuf")
     text_format.Parse(metadata_pbtxt, config_pb2.RunMetadata())
Ejemplo n.º 5
0
 def test_run_metadata(self, plugin):
     result = plugin.run_metadata_impl(_RUN_WITH_GRAPH_WITH_METADATA[0],
                                       self._METADATA_TAG)
     if plugin._data_provider:
         # Hack, for now
         self.assertEqual(result, None)
     else:
         (metadata_pbtxt, mime_type) = result
         self.assertEqual(mime_type, "text/x-protobuf")
         text_format.Parse(metadata_pbtxt, config_pb2.RunMetadata())
  def testGraphDefProperty(self):
    with tf.compat.v1.Session() as sess:
      z, run_options = self._createTestGraphAndRunOptions(sess, gated_grpc=True)

      sess.run(tf.compat.v1.global_variables_initializer())
      run_metadata = config_pb2.RunMetadata()
      self.assertAllClose(
          [10.0], sess.run(z, options=run_options, run_metadata=run_metadata))

      graph_wrapper = debug_graphs_helper.DebugGraphWrapper(
          run_metadata.partition_graphs[0])
      self.assertProtoEquals(
          run_metadata.partition_graphs[0], graph_wrapper.graph_def)
  def testExtractGatedGrpcTensorsFoundNoGatedGrpcOps(self):
    with tf.compat.v1.Session() as sess:
      z, run_options = self._createTestGraphAndRunOptions(sess,
                                                          gated_grpc=False)

      sess.run(tf.compat.v1.global_variables_initializer())
      run_metadata = config_pb2.RunMetadata()
      self.assertAllClose(
          [10.0], sess.run(z, options=run_options, run_metadata=run_metadata))

      graph_wrapper = debug_graphs_helper.DebugGraphWrapper(
          run_metadata.partition_graphs[0])
      gated_debug_ops = graph_wrapper.get_gated_grpc_tensors()
      self.assertEqual([], gated_debug_ops)
Ejemplo n.º 8
0
    def RunMetadata(self, tag):
        """Given a tag, return the associated session.run() metadata.

        Args:
          tag: A string tag associated with the event.

        Raises:
          ValueError: If the tag is not found.

        Returns:
          The metadata in form of `RunMetadata` proto.
        """
        if tag not in self._tagged_metadata:
            raise ValueError("There is no run metadata with this tag name")

        run_metadata = config_pb2.RunMetadata()
        run_metadata.ParseFromString(self._tagged_metadata[tag])
        return run_metadata
Ejemplo n.º 9
0
def run_sobel(logdir, verbose=False):
    """Run a Sobel edge detection demonstration.

    See the summary description for more details.

    Arguments:
      logdir: Directory into which to write event logs.
      verbose: Boolean; whether to log any output.
    """
    if verbose:
        logger.info("--- Starting run: sobel")

    tf.reset_default_graph()
    tf.set_random_seed(0)

    image = get_image(verbose=verbose)
    kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

    with tf.name_scope("horizontal_kernel"):
        kernel_side_length = kernel_radius * 2 + 1
        # Drop off influence for pixels further away from the center.
        weighting_kernel = 1.0 - tf.abs(
            tf.linspace(-1.0, 1.0, num=kernel_side_length))
        differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
        horizontal_kernel = tf.matmul(
            tf.expand_dims(weighting_kernel, 1),
            tf.expand_dims(differentiation_kernel, 0),
        )

    with tf.name_scope("vertical_kernel"):
        vertical_kernel = tf.transpose(a=horizontal_kernel)

    float_image = tf.cast(image, tf.float32)
    dx = convolve(float_image, horizontal_kernel, name="convolve_dx")
    dy = convolve(float_image, vertical_kernel, name="convolve_dy")
    gradient_magnitude = tf.norm(tensor=[dx, dy],
                                 axis=0,
                                 name="gradient_magnitude")
    with tf.name_scope("normalized_gradient"):
        normalized_gradient = gradient_magnitude / tf.reduce_max(
            input_tensor=gradient_magnitude)
    with tf.name_scope("output_image"):
        output_image = tf.cast(255 * normalized_gradient, tf.uint8)

    summ = image_summary.op(
        "sobel",
        tf.stack([output_image]),
        display_name="Sobel edge detection",
        description=(
            "Demonstration of [Sobel edge detection]. The step "
            "parameter adjusts the radius of the kernel. "
            "The kernel can be of arbitrary size, and considers "
            "nearby pixels with \u2113\u2082-linear falloff.\n\n"
            # (that says ``$\ell_2$-linear falloff'')
            "Edge detection is done on a per-channel basis, so "
            "you can observe which edges are “mostly red "
            "edges,” for instance.\n\n"
            "For practical edge detection, a small kernel "
            "(usually not more than more than *r*=2) is best.\n\n"
            "[Sobel edge detection]: %s\n\n"
            "%s" %
            ("https://en.wikipedia.org/wiki/Sobel_operator", IMAGE_CREDIT)),
    )

    with tf.Session() as sess:
        sess.run(image.initializer)
        writer = tf.summary.FileWriter(os.path.join(logdir, "sobel"))
        writer.add_graph(sess.graph)
        for step in xrange(8):
            if verbose:
                logger.info("--- sobel: step: %s" % step)
                feed_dict = {kernel_radius: step}
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = config_pb2.RunMetadata()
            s = sess.run(
                summ,
                feed_dict=feed_dict,
                options=run_options,
                run_metadata=run_metadata,
            )
            writer.add_summary(s, global_step=step)
            writer.add_run_metadata(run_metadata, "step_%04d" % step)
        writer.close()
Ejemplo n.º 10
0
def run_box_to_gaussian(logdir, verbose=False):
    """Run a box-blur-to-Gaussian-blur demonstration.

    See the summary description for more details.

    Arguments:
      logdir: Directory into which to write event logs.
      verbose: Boolean; whether to log any output.
    """
    if verbose:
        logger.info("--- Starting run: box_to_gaussian")

    tf.reset_default_graph()
    tf.set_random_seed(0)

    image = get_image(verbose=verbose)
    blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
    with tf.name_scope("filter"):
        blur_side_length = blur_radius * 2 + 1
        pixel_filter = tf.ones((blur_side_length, blur_side_length))
        pixel_filter = pixel_filter / tf.cast(tf.size(input=pixel_filter),
                                              tf.float32)  # normalize

    iterations = 4
    images = [tf.cast(image, tf.float32) / 255.0]
    for _ in xrange(iterations):
        images.append(convolve(images[-1], pixel_filter))
    with tf.name_scope("convert_to_uint8"):
        images = tf.stack([
            tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
            for image_ in images
        ])

    summ = image_summary.op(
        "box_to_gaussian",
        images,
        max_outputs=iterations,
        display_name="Gaussian blur as a limit process of box blurs",
        description=(
            "Demonstration of forming a Gaussian blur by "
            "composing box blurs, each of which can be expressed "
            "as a 2D convolution.\n\n"
            "A Gaussian blur is formed by convolving a Gaussian "
            "kernel over an image. But a Gaussian kernel is "
            "itself the limit of convolving a constant kernel "
            "with itself many times. Thus, while applying "
            "a box-filter convolution just once produces "
            "results that are noticeably different from those "
            "of a Gaussian blur, repeating the same convolution "
            "just a few times causes the result to rapidly "
            "converge to an actual Gaussian blur.\n\n"
            "Here, the step value controls the blur radius, "
            "and the image sample controls the number of times "
            "that the convolution is applied (plus one). "
            "So, when *sample*=1, the original image is shown; "
            "*sample*=2 shows a box blur; and a hypothetical "
            "*sample*=∞ would show a true Gaussian blur.\n\n"
            "This is one ingredient in a recipe to compute very "
            "fast Gaussian blurs. The other pieces require "
            "special treatment for the box blurs themselves "
            "(decomposition to dual one-dimensional box blurs, "
            "each of which is computed with a sliding window); "
            "we don’t perform those optimizations here.\n\n"
            "[Here are some slides describing the full process.]"
            "(%s)\n\n"
            "%s" % (
                "http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf",
                IMAGE_CREDIT,
            )),
    )

    with tf.Session() as sess:
        sess.run(image.initializer)
        writer = tf.summary.FileWriter(os.path.join(logdir, "box_to_gaussian"))
        writer.add_graph(sess.graph)
        for step in xrange(8):
            if verbose:
                logger.info("--- box_to_gaussian: step: %s" % step)
                feed_dict = {blur_radius: step}
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = config_pb2.RunMetadata()
            s = sess.run(
                summ,
                feed_dict=feed_dict,
                options=run_options,
                run_metadata=run_metadata,
            )
            writer.add_summary(s, global_step=step)
            writer.add_run_metadata(run_metadata, "step_%04d" % step)
        writer.close()
    def testTensorsRealistically(self):
        """Test accumulator by writing values and then reading them."""

        def FakeScalarSummary(tag, value):
            value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
            summary = summary_pb2.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), "values_dir")
        if tf.io.gfile.isdir(directory):
            tf.io.gfile.rmtree(directory)
        tf.io.gfile.mkdir(directory)

        writer = test_util.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
            # Add a graph to the summary writer.
            writer.add_graph(graph)
            graph_def = graph.as_graph_def(add_shapes=True)
            meta_graph_def = tf.compat.v1.train.export_meta_graph(
                graph_def=graph_def
            )
            writer.add_meta_graph(meta_graph_def)

        run_metadata = config_pb2.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = "test device"
        writer.add_run_metadata(run_metadata, "test run")

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary("id", i)
            summ_sq = FakeScalarSummary("sq", i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(),
            {
                ea.TENSORS: ["id", "sq"],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ["test run"],
            },
        )
        id_events = acc.Tensors("id")
        sq_events = acc.Tensors("sq")
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(
                i, tensor_util.make_ndarray(id_events[i].tensor_proto).item()
            )
            self.assertEqual(
                i * i,
                tensor_util.make_ndarray(sq_events[i].tensor_proto).item(),
            )

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary("id", i)
            summ_sq = FakeScalarSummary("sq", i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Tensors("id")
        sq_events = acc.Tensors("sq")
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(
                i, tensor_util.make_ndarray(id_events[i].tensor_proto).item()
            )
            self.assertEqual(
                i * i,
                tensor_util.make_ndarray(sq_events[i].tensor_proto).item(),
            )

        expected_graph_def = graph_pb2.GraphDef.FromString(
            graph.as_graph_def(add_shapes=True).SerializeToString()
        )
        self.assertProtoEquals(expected_graph_def, acc.Graph())
        self.assertProtoEquals(
            expected_graph_def,
            graph_pb2.GraphDef.FromString(acc.SerializedGraph()),
        )

        expected_meta_graph = meta_graph_pb2.MetaGraphDef.FromString(
            meta_graph_def.SerializeToString()
        )
        self.assertProtoEquals(expected_meta_graph, acc.MetaGraph())
Ejemplo n.º 12
0
    def testScalarsRealistically(self):
        """Test accumulator by writing values and then reading them."""
        def FakeScalarSummary(tag, value):
            value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
            summary = summary_pb2.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), 'values_dir')
        if tf.io.gfile.isdir(directory):
            tf.io.gfile.rmtree(directory)
        tf.io.gfile.mkdir(directory)

        writer = test_util.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        writer.add_graph(graph)
        meta_graph_def = tf.compat.v1.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        run_metadata = config_pb2.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = 'test device'
        writer.add_run_metadata(run_metadata, 'test run')

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.SCALARS: ['id', 'sq'],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ['test run'],
            })
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())