Пример #1
0
    def generate_run(self, run_name):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32, shape=[3])

        if run_name == self._RUN_WITH_LEGACY_HISTOGRAM:
            tf.summary.histogram(self._LEGACY_HISTOGRAM_TAG, placeholder)
        elif run_name == self._RUN_WITH_HISTOGRAM:
            summary.op(self._HISTOGRAM_TAG,
                       placeholder,
                       display_name=self._DISPLAY_NAME,
                       description=self._DESCRIPTION)
        elif run_name == self._RUN_WITH_SCALARS:
            tf.summary.scalar(self._SCALAR_TAG, tf.reduce_mean(placeholder))
        else:
            assert False, 'Invalid run name: %r' % run_name
        summ = tf.summary.merge_all()

        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)
        for step in xrange(self._STEPS):
            feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
            s = sess.run(summ, feed_dict=feed_dict)
            writer.add_summary(s, global_step=step)
        writer.close()
Пример #2
0
    def generate_run(self, run_name, include_graph):
        """Create a run with a text summary, metadata, and optionally a graph."""
        tf.reset_default_graph()
        k1 = tf.constant(math.pi, name='k1')
        k2 = tf.constant(math.e, name='k2')
        result = (k1**k2) - k1
        expected = tf.constant(20.0, name='expected')
        error = tf.abs(result - expected, name='error')
        message_prefix_value = 'error ' * 1000
        true_length = len(message_prefix_value)
        assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
        message_prefix = tf.constant(message_prefix_value,
                                     name='message_prefix')
        error_message = tf.string_join(
            [message_prefix,
             tf.as_string(error, name='error_string')],
            name='error_message')
        summary_message = tf.summary.text('summary_message', error_message)

        sess = tf.Session()
        writer = tf.summary.FileWriter(os.path.join(self.logdir, run_name))
        if include_graph:
            writer.add_graph(sess.graph)
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        s = sess.run(summary_message,
                     options=options,
                     run_metadata=run_metadata)
        writer.add_summary(s)
        writer.add_run_metadata(run_metadata, self._METADATA_TAG)
        writer.close()
Пример #3
0
    def generate_testdata(self, include_text=True, logdir=None):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.string)
        summary_tensor = tf.summary.text('message', placeholder)
        vector_summary = tf.summary.text('vector', placeholder)
        scalar_summary = tf.summary.scalar('twelve', tf.constant(12))

        run_names = ['fry', 'leela']
        for run_name in run_names:
            subdir = os.path.join(logdir or self.logdir, run_name)
            writer = tf.summary.FileWriter(subdir)
            writer.add_graph(sess.graph)

            step = 0
            for gem in GEMS:
                message = run_name + ' *loves* ' + gem
                feed_dict = {
                    placeholder: message,
                }
                if include_text:
                    summ = sess.run(summary_tensor, feed_dict=feed_dict)
                    writer.add_summary(summ, global_step=step)
                step += 1

            vector_message = ['one', 'two', 'three', 'four']
            if include_text:
                summ = sess.run(vector_summary,
                                feed_dict={placeholder: vector_message})
                writer.add_summary(summ)

            summ = sess.run(scalar_summary, feed_dict={placeholder: []})
            writer.add_summary(summ)

            writer.close()
Пример #4
0
    def generate_testdata(self):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.constant('I am deprecated.')

        # Previously, we had used a means of creating text summaries that used
        # plugin assets (which loaded JSON files containing runs and tags). The
        # plugin must continue to be able to load summaries of that format, so we
        # create a summary using that old plugin asset-based method here.
        plugin_asset_summary = tf.summary.tensor_summary(
            'old_plugin_asset_summary', placeholder)
        assets_directory = os.path.join(self.logdir, 'fry', 'plugins',
                                        'tensorboard_text')
        # Make the directory of assets if it does not exist.
        if not os.path.isdir(assets_directory):
            try:
                os.makedirs(assets_directory)
            except OSError as err:
                self.assertFail('Could not make assets directory %r: %r',
                                assets_directory, err)
        json_path = os.path.join(assets_directory, 'tensors.json')
        with open(json_path, 'w+') as tensors_json_file:
            # Write the op name to a JSON file that the text plugin later uses to
            # determine the tag names of tensors to fetch.
            tensors_json_file.write(json.dumps([plugin_asset_summary.op.name]))

        run_name = 'fry'
        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)

        summ = sess.run(plugin_asset_summary)
        writer.add_summary(summ)
        writer.close()
Пример #5
0
  def setUp(self):
    self.log_dir = tempfile.mkdtemp()

    # We use numpy.random to generate images. We seed to avoid non-determinism
    # in this test.
    numpy.random.seed(42)

    # Create old-style image summaries for run "foo".
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    tf.summary.image(name="baz", tensor=placeholder)
    merged_summary_op = tf.summary.merge_all()
    foo_directory = os.path.join(self.log_dir, "foo")
    writer = tf.summary.FileWriter(foo_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 16, 42, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Create new-style image summaries for run bar.
    tf.reset_default_graph()
    sess = tf.Session()
    placeholder = tf.placeholder(tf.uint8)
    summary.op(name="quux", images=placeholder,
               description="how do you pronounce that, anyway?")
    merged_summary_op = tf.summary.merge_all()
    bar_directory = os.path.join(self.log_dir, "bar")
    writer = tf.summary.FileWriter(bar_directory)
    writer.add_graph(sess.graph)
    for step in xrange(2):
      writer.add_summary(sess.run(merged_summary_op, feed_dict={
          placeholder: (numpy.random.rand(1, 8, 6, 3) * 255).astype(
              numpy.uint8)
      }), global_step=step)
    writer.close()

    # Start a server with the plugin.
    multiplexer = event_multiplexer.EventMultiplexer({
        "foo": foo_directory,
        "bar": bar_directory,
    })
    context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    plugin = images_plugin.ImagesPlugin(context)
    # Setting a reload interval of -1 disables reloading. We disable reloading
    # because we seek to block tests from running til after one reload finishes.
    # This setUp method thus manually reloads the multiplexer. TensorBoard would
    # otherwise reload in a non-blocking thread.
    wsgi_app = application.TensorBoardWSGIApp(
        self.log_dir, [plugin], multiplexer, reload_interval=-1, path_prefix='')
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
    multiplexer.Reload()
    self.routes = plugin.get_plugin_apps()
Пример #6
0
    def setUp(self):
        super(SummaryTest, self).setUp()
        tf.reset_default_graph()

        self.image_width = 300
        self.image_height = 75
        self.image_count = 8
        np.random.seed(0)
        self.images = self._generate_images(channels=3)
        self.images_with_alpha = self._generate_images(channels=4)
Пример #7
0
    def setUp(self):
        super(SummaryTest, self).setUp()
        tf.reset_default_graph()

        self.samples_per_second = 44100
        self.audio_count = 6
        stereo_shape = (self.audio_count, -1, 2)
        space = (np.linspace(0.0, 100.0, self.samples_per_second).astype(
            np.float32).reshape(stereo_shape))
        self.audio_length = space.shape[1]
        self.stereo = np.sin(space)
        self.mono = self.stereo.mean(axis=2, keepdims=True)
Пример #8
0
  def _generate_test_data(self, run_name, experiment_name):
    """Generates the test data directory.

    The test data has a single run of the given name, containing:
      - a graph definition and metagraph definition

    Arguments:
      run_name: The directory under self.logdir into which to write
          events.
    """
    run_path = os.path.join(self.logdir, run_name)
    writer = tf.summary.FileWriter(run_path)

    # Add a simple graph event.
    graph_def = tf.GraphDef()
    node1 = graph_def.node.add()
    node1.name = 'a'
    node2 = graph_def.node.add()
    node2.name = 'b'
    node2.attr['very_large_attr'].s = b'a' * 2048  # 2 KB attribute

    meta_graph_def = tf.MetaGraphDef(graph_def=graph_def)

    if self._only_use_meta_graph:
      writer.add_meta_graph(meta_graph_def)
    else:
      writer.add_graph(graph_def)

    writer.flush()
    writer.close()

    # Write data for the run to the database.
    # TODO(nickfelt): Figure out why reseting the graph is necessary.
    tf.reset_default_graph()
    db_writer = tf.contrib.summary.create_db_writer(
        db_uri=self.db_path,
        experiment_name=experiment_name,
        run_name=run_name,
        user_name='user')
    with db_writer.as_default(), tf.contrib.summary.always_record_summaries():
      tf.contrib.summary.scalar('mytag', 1)

    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.contrib.summary.summary_writer_initializer_op())
      sess.run(tf.contrib.summary.all_summary_ops())
Пример #9
0
def run_all(logdir):
    tf.reset_default_graph()
    step_placeholder = tf.placeholder(tf.int32)

    with tf.name_scope('simple_example'):
        simple_example(step_placeholder)
    with tf.name_scope('markdown_table'):
        markdown_table(step_placeholder)
    with tf.name_scope('higher_order_tensors'):
        higher_order_tensors(step_placeholder)
    all_summaries = tf.summary.merge_all()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir)
        writer.add_graph(sess.graph)
        for step in xrange(STEPS):
            s = sess.run(all_summaries, feed_dict={step_placeholder: step})
            writer.add_summary(s, global_step=step)
        writer.close()
Пример #10
0
    def generate_run_to_db(self, experiment_name, run_name):
        tf.reset_default_graph()

        global_step = tf.placeholder(tf.int64)
        db_writer = tf.contrib.summary.create_db_writer(
            db_uri=self.db_path,
            experiment_name=experiment_name,
            run_name=run_name,
            user_name='user')

        scalar_ops = None
        with db_writer.as_default(
        ), tf.contrib.summary.always_record_summaries():
            tf.contrib.summary.scalar(self._SCALAR_TAG, 42, step=global_step)
            flush_op = tf.contrib.summary.flush(db_writer._resource)

        with tf.Session() as sess:
            sess.run(tf.contrib.summary.summary_writer_initializer_op())
            for step in xrange(self._STEPS):
                feed_dict = {global_step: step}
                sess.run(tf.contrib.summary.all_summary_ops(),
                         feed_dict=feed_dict)
            sess.run(flush_op)
Пример #11
0
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate audio. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create old-style audio summaries for run "foo".
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.float32)
        tf.summary.audio(name="baz", tensor=placeholder, sample_rate=44100)
        merged_summary_op = tf.summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        writer = tf.summary.FileWriter(foo_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={placeholder: numpy.random.rand(42, 22050) * 2 - 1}),
                               global_step=step)
        writer.close()

        # Create new-style audio summaries for run "bar".
        tf.reset_default_graph()
        sess = tf.Session()
        audio_placeholder = tf.placeholder(tf.float32)
        labels_placeholder = tf.placeholder(tf.string)
        summary.op("quux",
                   audio_placeholder,
                   sample_rate=44100,
                   labels=labels_placeholder,
                   description="how do you pronounce that, anyway?")
        merged_summary_op = tf.summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        writer = tf.summary.FileWriter(bar_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            # The floats (sample data) range from -1 to 1.
            writer.add_summary(sess.run(
                merged_summary_op,
                feed_dict={
                    audio_placeholder:
                    numpy.random.rand(42, 11025, 1) * 2 - 1,
                    labels_placeholder: [
                        tf.compat.as_bytes('step **%s**, sample %s' %
                                           (step, sample))
                        for sample in xrange(42)
                    ],
                }),
                               global_step=step)
        writer.close()

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        context = base_plugin.TBContext(logdir=self.log_dir,
                                        multiplexer=multiplexer)
        self.plugin = audio_plugin.AudioPlugin(context)
        # Setting a reload interval of -1 disables reloading. We disable reloading
        # because we seek to block tests from running til after one reload finishes.
        # This setUp method thus manually reloads the multiplexer. TensorBoard would
        # otherwise reload in a non-blocking thread.
        wsgi_app = application.TensorBoardWSGIApp(self.log_dir, [self.plugin],
                                                  multiplexer,
                                                  reload_interval=-1,
                                                  path_prefix='')
        self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
        multiplexer.Reload()
Пример #12
0
def run(logdir, run_name, wave_name, wave_constructor):
    """Generate wave data of the given form.

  The provided function `wave_constructor` should accept a scalar tensor
  of type float32, representing the frequency (in Hz) at which to
  construct a wave, and return a tensor of shape [1, _samples(), `n`]
  representing audio data (for some number of channels `n`).

  Waves will be generated at frequencies ranging from A4 to A5.

  Arguments:
    logdir: the top-level directory into which to write summary data
    run_name: the name of this run; will be created as a subdirectory
      under logdir
    wave_name: the name of the wave being generated
    wave_constructor: see above
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    # On each step `i`, we'll set this placeholder to `i`. This allows us
    # to know "what time it is" at each step.
    step_placeholder = tf.placeholder(tf.float32, shape=[])

    # We want to linearly interpolate a frequency between A4 (440 Hz) and
    # A5 (880 Hz).
    with tf.name_scope('compute_frequency'):
        f_min = 440.0
        f_max = 880.0
        t = step_placeholder / (FLAGS.steps - 1)
        frequency = f_min * (1.0 - t) + f_max * t

    # Let's log this frequency, just so that we can make sure that it's as
    # expected.
    tf.summary.scalar('frequency', frequency)

    # Now, we pass this to the wave constructor to get our waveform. Doing
    # so within a name scope means that any summaries that the wave
    # constructor produces will be namespaced.
    with tf.name_scope(wave_name):
        waveform = wave_constructor(frequency)

    # We also have the opportunity to annotate each audio clip with a
    # label. This is a good place to include the frequency, because it'll
    # be visible immediately next to the audio clip.
    with tf.name_scope('compute_labels'):
        samples = tf.shape(waveform)[0]
        wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
        frequencies = tf.string_join([
            "*Frequency:* ",
            tf.tile([tf.as_string(frequency, precision=2)], [samples]),
            " Hz.",
        ])
        samples = tf.string_join([
            "*Sample:* ",
            tf.as_string(tf.range(samples) + 1),
            " of ",
            tf.as_string(samples),
            ".",
        ])
        labels = tf.string_join([wave_types, frequencies, samples],
                                separator=" ")

    # We can place a description next to the summary in TensorBoard. This
    # is a good place to explain what the summary represents, methodology
    # for creating it, etc. Let's include the source code of the function
    # that generated the wave.
    source = '\n'.join('    %s' % line.rstrip()
                       for line in inspect.getsourcelines(wave_constructor)[0])
    description = ("A wave of type `%r`, generated via:\n\n%s" %
                   (wave_name, source))

    # Here's the crucial piece: we interpret this result as audio.
    summary.op('waveform',
               waveform,
               FLAGS.sample_rate,
               labels=labels,
               display_name=wave_name,
               description=description)

    # Now, we can collect up all the summaries and begin the run.
    summ = tf.summary.merge_all()

    sess = tf.Session()
    writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
    writer.add_graph(sess.graph)
    sess.run(tf.global_variables_initializer())
    for step in xrange(FLAGS.steps):
        s = sess.run(summ, feed_dict={step_placeholder: float(step)})
        writer.add_summary(s, global_step=step)
    writer.close()
Пример #13
0
def run_box_to_gaussian(logdir, verbose=False):
  """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: box_to_gaussian')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
  with tf.name_scope('filter'):
    blur_side_length = blur_radius * 2 + 1
    pixel_filter = tf.ones((blur_side_length, blur_side_length))
    pixel_filter = (pixel_filter
                    / tf.cast(tf.size(pixel_filter), tf.float32))  # normalize

  iterations = 4
  images = [tf.cast(image, tf.float32) / 255.0]
  for _ in xrange(iterations):
    images.append(convolve(images[-1], pixel_filter))
  with tf.name_scope('convert_to_uint8'):
    images = tf.stack(
        [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
         for image_ in images])

  summ = image_summary.op(
      'box_to_gaussian', images, max_outputs=iterations,
      display_name='Gaussian blur as a limit process of box blurs',
      description=('Demonstration of forming a Gaussian blur by '
                   'composing box blurs, each of which can be expressed '
                   'as a 2D convolution.\n\n'
                   'A Gaussian blur is formed by convolving a Gaussian '
                   'kernel over an image. But a Gaussian kernel is '
                   'itself the limit of convolving a constant kernel '
                   'with itself many times. Thus, while applying '
                   'a box-filter convolution just once produces '
                   'results that are noticeably different from those '
                   'of a Gaussian blur, repeating the same convolution '
                   'just a few times causes the result to rapidly '
                   'converge to an actual Gaussian blur.\n\n'
                   'Here, the step value controls the blur radius, '
                   'and the image sample controls the number of times '
                   'that the convolution is applied (plus one). '
                   'So, when *sample*=1, the original image is shown; '
                   '*sample*=2 shows a box blur; and a hypothetical '
                   '*sample*=∞ would show a true Gaussian blur.\n\n'
                   'This is one ingredient in a recipe to compute very '
                   'fast Gaussian blurs. The other pieces require '
                   'special treatment for the box blurs themselves '
                   '(decomposition to dual one-dimensional box blurs, '
                   'each of which is computed with a sliding window); '
                   'we don’t perform those optimizations here.\n\n'
                   '[Here are some slides describing the full process.]'
                   '(%s)\n\n'
                   '%s'
                   % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info('--- box_to_gaussian: step: %s' % step)
        feed_dict = {blur_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
Пример #14
0
 def setUp(self):
     super(StreamingOpTest, self).setUp()
     tf.reset_default_graph()
     np.random.seed(1)
Пример #15
0
 def setUp(self):
     super(PrCurveTest, self).setUp()
     tf.reset_default_graph()
     np.random.seed(42)
Пример #16
0
def start_runs(logdir,
               steps,
               run_name,
               thresholds,
               mask_every_other_prediction=False):
    """Generate a PR curve with precision and recall evenly weighted.

  Arguments:
    logdir: The directory into which to store all the runs' data.
    steps: The number of steps to run for.
    run_name: The name of the run.
    thresholds: The number of thresholds to use for PR curves.
    mask_every_other_prediction: Whether to mask every other prediction by
      alternating weights between 0 and 1.
  """
    tf.reset_default_graph()
    tf.set_random_seed(42)

    # Create a normal distribution layer used to generate true color labels.
    distribution = tf.distributions.Normal(loc=0., scale=142.)

    # Sample the distribution to generate colors. Lets generate different numbers
    # of each color. The first dimension is the count of examples.

    # The calls to sample() are given fixed random seed values that are "magic"
    # in that they correspond to the default seeds for those ops when the PR
    # curve test (which depends on this code) was written. We've pinned these
    # instead of continuing to use the defaults since the defaults are based on
    # node IDs from the sequence of nodes added to the graph, which can silently
    # change when this code or any TF op implementations it uses are modified.

    # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.

    # Generate reds.
    number_of_reds = 100
    true_reds = tf.clip_by_value(
        tf.concat([
            255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
            tf.abs(distribution.sample([number_of_reds, 2], seed=34))
        ],
                  axis=1), 0, 255)

    # Generate greens.
    number_of_greens = 200
    true_greens = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
            255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
            tf.abs(distribution.sample([number_of_greens, 1], seed=105))
        ],
                  axis=1), 0, 255)

    # Generate blues.
    number_of_blues = 150
    true_blues = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
            255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153))
        ],
                  axis=1), 0, 255)

    # Assign each color a vector of 3 booleans based on its true label.
    labels = tf.concat([
        tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
        tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
        tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
    ],
                       axis=0)

    # We introduce 3 normal distributions. They are used to predict whether a
    # color falls under a certain class (based on distances from corners of the
    # color triangle). The distributions vary per color. We have the distributions
    # narrow over time.
    initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
    iteration = tf.placeholder(tf.int32, shape=[])
    red_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[0] - iteration,
                      dtype=tf.float32))
    green_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[1] - iteration,
                      dtype=tf.float32))
    blue_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[2] - iteration,
                      dtype=tf.float32))

    # Make predictions (assign 3 probabilities to each color based on each color's
    # distance to each of the 3 corners). We seek double the area in the right
    # tail of the normal distribution.
    examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
    probabilities_colors_are_red = (1 - red_predictor.cdf(
        tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2
    probabilities_colors_are_green = (1 - green_predictor.cdf(
        tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2
    probabilities_colors_are_blue = (1 - blue_predictor.cdf(
        tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2

    predictions = (probabilities_colors_are_red,
                   probabilities_colors_are_green,
                   probabilities_colors_are_blue)

    # This is the crucial piece. We write data required for generating PR curves.
    # We create 1 summary per class because we create 1 PR curve per class.
    for i, color in enumerate(('red', 'green', 'blue')):
        description = (
            'The probabilities used to create this PR curve are '
            'generated from a normal distribution. Its standard '
            'deviation is initially %0.0f and decreases over time.' %
            initial_standard_deviations[i])

        weights = None
        if mask_every_other_prediction:
            # Assign a weight of 0 to every even-indexed prediction. Odd-indexed
            # predictions are assigned a default weight of 1.
            consecutive_indices = tf.reshape(tf.range(tf.size(predictions[i])),
                                             tf.shape(predictions[i]))
            weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)

        summary.op(name=color,
                   labels=labels[:, i],
                   predictions=predictions[i],
                   num_thresholds=thresholds,
                   weights=weights,
                   display_name='classifying %s' % color,
                   description=description)
    merged_summary_op = tf.summary.merge_all()
    events_directory = os.path.join(logdir, run_name)
    sess = tf.Session()
    writer = tf.summary.FileWriter(events_directory, sess.graph)

    for step in xrange(steps):
        feed_dict = {
            iteration: step,
        }
        merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
        writer.add_summary(merged_summary, step)

    writer.close()
Пример #17
0
  def setUp(self):
    super(SummaryTest, self).setUp()
    tf.reset_default_graph()

    np.random.seed(0)
    self.gaussian = np.random.normal(size=[500])
Пример #18
0
def run_sobel(logdir, verbose=False):
  """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: sobel')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

  with tf.name_scope('horizontal_kernel'):
    kernel_side_length = kernel_radius * 2 + 1
    # Drop off influence for pixels further away from the center.
    weighting_kernel = (
        1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
    differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
    horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1),
                                  tf.expand_dims(differentiation_kernel, 0))

  with tf.name_scope('vertical_kernel'):
    vertical_kernel = tf.transpose(horizontal_kernel)

  float_image = tf.cast(image, tf.float32)
  dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
  dy = convolve(float_image, vertical_kernel, name='convolve_dy')
  gradient_magnitude = tf.norm([dx, dy], axis=0, name='gradient_magnitude')
  with tf.name_scope('normalized_gradient'):
    normalized_gradient = gradient_magnitude / tf.reduce_max(gradient_magnitude)
  with tf.name_scope('output_image'):
    output_image = tf.cast(255 * normalized_gradient, tf.uint8)

  summ = image_summary.op(
      'sobel', tf.stack([output_image]),
      display_name='Sobel edge detection',
      description=(u'Demonstration of [Sobel edge detection]. The step '
                   'parameter adjusts the radius of the kernel. '
                   'The kernel can be of arbitrary size, and considers '
                   u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
                   # (that says ``$\ell_2$-linear falloff'')
                   'Edge detection is done on a per-channel basis, so '
                   'you can observe which edges are “mostly red '
                   'edges,” for instance.\n\n'
                   'For practical edge detection, a small kernel '
                   '(usually not more than more than *r*=2) is best.\n\n'
                   '[Sobel edge detection]: %s\n\n'
                   "%s"
                   % ('https://en.wikipedia.org/wiki/Sobel_operator',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info("--- sobel: step: %s" % step)
        feed_dict = {kernel_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
Пример #19
0
def run(logdir, session_id, hparams, group_name):
    """Runs a temperature simulation.

  This will simulate an object at temperature `initial_temperature`
  sitting at rest in a large room at temperature `ambient_temperature`.
  The object has some intrinsic `heat_coefficient`, which indicates
  how much thermal conductivity it has: for instance, metals have high
  thermal conductivity, while the thermal conductivity of water is low.

  Over time, the object's temperature will adjust to match the
  temperature of its environment. We'll track the object's temperature,
  how far it is from the room's temperature, and how much it changes at
  each time step.

  Arguments:
    logdir: the top-level directory into which to write summary data
    session_id: an id for the session.
    hparams: A dictionary mapping an hyperparameter name to its value.
    group_name: an id for the session group this session belongs to.
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    initial_temperature = hparams['initial_temperature']
    ambient_temperature = hparams['ambient_temperature']
    heat_coefficient = hparams['heat_coefficient']
    session_dir = os.path.join(logdir, session_id)
    writer = tf.summary.FileWriter(session_dir)
    writer.add_summary(
        summary.session_start_pb(hparams=hparams, group_name=group_name))
    writer.flush()
    with tf.name_scope('temperature'):
        # Create a mutable variable to hold the object's temperature, and
        # create a scalar summary to track its value over time. The name of
        # the summary will appear as "temperature/current" due to the
        # name-scope above.
        temperature = tf.Variable(tf.constant(initial_temperature),
                                  name='temperature')
        scalar_summary.op('current',
                          temperature,
                          display_name='Temperature',
                          description='The temperature of the object under '
                          'simulation, in Kelvins.')

        # Compute how much the object's temperature differs from that of its
        # environment, and track this, too: likewise, as
        # "temperature/difference_to_ambient".
        ambient_difference = temperature - ambient_temperature
        scalar_summary.op(
            'difference_to_ambient',
            ambient_difference,
            display_name='Difference to ambient temperature',
            description=('The difference between the ambient '
                         'temperature and the temperature of the '
                         'object under simulation, in Kelvins.'))

    # Newton suggested that the rate of change of the temperature of an
    # object is directly proportional to this `ambient_difference` above,
    # where the proportionality constant is what we called the heat
    # coefficient. But in real life, not everything is quite so clean, so
    # we'll add in some noise. (The value of 50 is arbitrary, chosen to
    # make the data look somewhat interesting. :-) )
    noise = 50 * tf.random_normal([])
    delta = -heat_coefficient * (ambient_difference + noise)
    scalar_summary.op(
        'delta',
        delta,
        description='The change in temperature from the previous '
        'step, in Kelvins.')

    # Collect all the scalars that we want to keep track of.
    summ = tf.summary.merge_all()

    # Now, augment the current temperature by this delta that we computed,
    # blocking the assignment on summary collection to avoid race conditions
    # and ensure that the summary always reports the pre-update value.
    with tf.control_dependencies([summ]):
        update_step = temperature.assign_add(delta)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for step in xrange(STEPS):
        # By asking TensorFlow to compute the update step, we force it to
        # change the value of the temperature variable. We don't actually
        # care about this value, so we discard it; instead, we grab the
        # summary data computed along the way.
        (s, _) = sess.run([summ, update_step])
        writer.add_summary(s, global_step=step)
    writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
    writer.close()