def higher_order_tensors(step): # We're not limited to passing scalar tensors to the summary # operation. If we pass a rank-1 or rank-2 tensor, it'll be visualized # as a table in TensorBoard. (For higher-ranked tensors, you'll see # just a 2D slice of the data.) # # To demonstrate this, let's create a multiplication table. # First, we'll create the table body, a `step`-by-`step` array of # strings. numbers = tf.range(step) numbers_row = tf.expand_dims(numbers, 0) # shape: [1, step] numbers_column = tf.expand_dims(numbers, 1) # shape: [step, 1] products = tf.matmul(numbers_column, numbers_row) # shape: [step, step] table_body = tf.as_string(products) # Next, we'll create a header row and column, and a little # multiplication sign to put in the corner. bold_numbers = tf.string_join(['**', tf.as_string(numbers), '**']) bold_row = tf.expand_dims(bold_numbers, 0) bold_column = tf.expand_dims(bold_numbers, 1) corner_cell = tf.constant(u'\u00d7'.encode('utf-8')) # MULTIPLICATION SIGN # Now, we have to put the pieces together. Using `axis=0` stacks # vertically; using `axis=1` juxtaposes horizontally. table_body_and_top_row = tf.concat([bold_row, table_body], axis=0) table_left_column = tf.concat([[[corner_cell]], bold_column], axis=0) table_full = tf.concat([table_left_column, table_body_and_top_row], axis=1) # The result, `table_full`, is a rank-2 string tensor of shape # `[step + 1, step + 1]`. We can pass it directly to the summary, and # we'll get a nicely formatted table in TensorBoard. tf.summary.text('multiplication_table', table_full)
def bisine_wahwah_wave(frequency): """Emit two sine waves with balance oscillating left and right.""" # # This is clearly intended to build on the bisine wave defined above, # so we can start by generating that. waves_a = bisine_wave(frequency) # # Then, by reversing axis 2, we swap the stereo channels. By mixing # this with `waves_a`, we'll be able to create the desired effect. waves_b = tf.reverse(waves_a, axis=[2]) # # Let's have the balance oscillate from left to right four times. iterations = 4 # # Now, we compute the balance for each sample: `ts` has values # in [0, 1] that indicate how much we should use `waves_a`. xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) thetas = xs / _samples() * iterations ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2 # # Finally, we can mix the two together, and we're done. wave = ts * waves_a + (1.0 - ts) * waves_b # # Alternately, we can make the effect more pronounced by exaggerating # the sample data. Let's emit both variations. exaggerated_wave = wave**3.0 return tf.concat([wave, exaggerated_wave], axis=0)
def op(name, images, max_outputs=3, display_name=None, description=None, collections=None): """Create an image summary op for use in a TensorFlow graph. Arguments: name: A unique name for the generated summary node. images: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 3, or 4. Any of the dimensions may be statically unknown (i.e., `None`). max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name), \ tf.control_dependencies([tf.assert_rank(images, 4), tf.assert_type(images, tf.uint8), tf.assert_non_negative(max_outputs)]): limited_images = images[:max_outputs] encoded_images = tf.map_fn(tf.image.encode_png, limited_images, dtype=tf.string, name='encode_each_image') image_shape = tf.shape(images) dimensions = tf.stack([ tf.as_string(image_shape[2], name='width'), tf.as_string(image_shape[1], name='height') ], name='dimensions') tensor = tf.concat([dimensions, encoded_images], axis=0) return tf.summary.tensor_summary(name='image_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)
def bisine_wave(frequency): """Emit two sine waves, in stereo at different octaves.""" # # We can first our existing sine generator to generate two different # waves. f_hi = frequency f_lo = frequency / 2.0 with tf.name_scope('hi'): sine_hi = sine_wave(f_hi) with tf.name_scope('lo'): sine_lo = sine_wave(f_lo) # # Now, we have two tensors of shape [1, _samples(), 1]. By concatenating # them along axis 2, we get a tensor of shape [1, _samples(), 2]---a # stereo waveform. return tf.concat([sine_lo, sine_hi], axis=2)
def run_all(logdir, verbose=False): """Generate a bunch of histogram data, and write it to logdir.""" del verbose tf.set_random_seed(0) k = tf.placeholder(tf.float32) # Make a normal distribution, with a shifting mean mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1) # Record that distribution into a histogram summary histogram_summary.op("normal/moving_mean", mean_moving_normal, description="A normal distribution whose mean changes " "over time.") # Make a normal distribution with shrinking variance shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k)) # Record that distribution too histogram_summary.op("normal/shrinking_variance", shrinking_normal, description="A normal distribution whose variance " "shrinks over time.") # Let's combine both of those distributions into one dataset normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0) # We add another histogram summary to record the combined distribution histogram_summary.op("normal/bimodal", normal_combined, description="A combination of two normal distributions, " "one with a moving mean and one with " "shrinking variance. The result is a " "distribution that starts as unimodal and " "becomes more and more bimodal over time.") # Add a gamma distribution gamma = tf.random_gamma(shape=[1000], alpha=k) histogram_summary.op("gamma", gamma, description="A gamma distribution whose shape " "parameter, α, changes over time.") # And a poisson distribution poisson = tf.random_poisson(shape=[1000], lam=k) histogram_summary.op("poisson", poisson, description="A Poisson distribution, which only " "takes on integer values.") # And a uniform distribution uniform = tf.random_uniform(shape=[1000], maxval=k*10) histogram_summary.op("uniform", uniform, description="A simple uniform distribution.") # Finally, combine everything together! all_distributions = [mean_moving_normal, shrinking_normal, gamma, poisson, uniform] all_combined = tf.concat(all_distributions, 0) histogram_summary.op("all_combined", all_combined, description="An amalgamation of five distributions: a " "uniform distribution, a gamma " "distribution, a Poisson distribution, and " "two normal distributions.") summaries = tf.summary.merge_all() # Setup a session and summary writer sess = tf.Session() writer = tf.summary.FileWriter(logdir) # Setup a loop and write the summaries to disk N = 400 for step in xrange(N): k_val = step/float(N) summ = sess.run(summaries, feed_dict={k: k_val}) writer.add_summary(summ, global_step=step)
def start_runs(logdir, steps, run_name, thresholds, mask_every_other_prediction=False): """Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. """ tf.reset_default_graph() tf.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. distribution = tf.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # The calls to sample() are given fixed random seed values that are "magic" # in that they correspond to the default seeds for those ops when the PR # curve test (which depends on this code) was written. We've pinned these # instead of continuing to use the defaults since the defaults are based on # node IDs from the sequence of nodes added to the graph, which can silently # change when this code or any TF op implementations it uses are modified. # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), tf.abs(distribution.sample([number_of_reds, 2], seed=34)) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_greens, 1], seed=61)), 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), tf.abs(distribution.sample([number_of_greens, 1], seed=105)) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_blues, 2], seed=132)), 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.placeholder(tf.int32, shape=[]) red_predictor = tf.distributions.Normal( loc=0., scale=tf.cast(initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.distributions.Normal( loc=0., scale=tf.cast(initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.distributions.Normal( loc=0., scale=tf.cast(initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = (probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ( 'The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape(tf.range(tf.size(predictions[i])), tf.shape(predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op(name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.Session() writer = tf.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()